aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSaoirse Stewart <saoirse.stewart@arm.com>2019-02-19 15:54:14 +0000
committerSaoirse Stewart Arm <saoirse.stewart@arm.com>2019-02-19 15:55:46 +0000
commit263829c2163d79a28f98f24f9dd1e52e1c3cbbef (patch)
treebd904ce4b8aeaa14bc0622bbacefda26011733f2
parent4fbae33571871ce584e421657e8ffba299e89d67 (diff)
downloadarmnn-263829c2163d79a28f98f24f9dd1e52e1c3cbbef.tar.gz
IVGCVSW-2642 Add Reshape to Serializer and Deserializer
Change-Id: Iccded3c6e3c0713c44f43231981440420591f94e Signed-off-by: Saoirse Stewart <saoirse.stewart@arm.com> Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnDeserializeParser/DeserializeParser.cpp100
-rw-r--r--src/armnnDeserializeParser/DeserializeParser.hpp3
-rw-r--r--src/armnnDeserializeParser/DeserializerSupport.md1
-rw-r--r--src/armnnDeserializeParser/test/DeserializeReshape.cpp128
-rw-r--r--src/armnnSerializer/Schema.fbs13
-rw-r--r--src/armnnSerializer/Serializer.cpp26
-rw-r--r--src/armnnSerializer/Serializer.hpp4
-rw-r--r--src/armnnSerializer/SerializerSupport.md1
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp61
10 files changed, 335 insertions, 3 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 47c0ae039b..967ffb1144 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -557,6 +557,7 @@ if(BUILD_UNIT_TESTS)
src/armnnDeserializeParser/test/DeserializeAdd.cpp
src/armnnDeserializeParser/test/DeserializeMultiplication.cpp
src/armnnDeserializeParser/test/DeserializePooling2d.cpp
+ src/armnnDeserializeParser/test/DeserializeReshape.cpp
src/armnnDeserializeParser/test/ParserFlatbuffersSerializeFixture.hpp
src/armnnDeserializeParser/test/SchemaSerialize.s
)
diff --git a/src/armnnDeserializeParser/DeserializeParser.cpp b/src/armnnDeserializeParser/DeserializeParser.cpp
index f47c23f0b5..de9b1a98c7 100644
--- a/src/armnnDeserializeParser/DeserializeParser.cpp
+++ b/src/armnnDeserializeParser/DeserializeParser.cpp
@@ -23,6 +23,9 @@
#include <Schema_generated.h>
#include <fstream>
+#include <algorithm>
+#include <limits>
+#include <numeric>
using armnn::ParseException;
using namespace armnn;
@@ -128,6 +131,25 @@ void CheckTensorPtr(DeserializeParser::TensorRawPtr rawPtr,
CheckGraph(GRAPH, LAYERS_INDEX, CHECK_LOCATION())
}
+bool CheckShape(const armnn::TensorShape& actual, const std::vector<uint32_t>& expected)
+{
+ const unsigned int actualSize = actual.GetNumDimensions();
+ if (actualSize != expected.size())
+ {
+ return false;
+ }
+
+ for (unsigned int i = 0u; i < actualSize; i++)
+ {
+ if (actual[i] != static_cast<unsigned int>(expected[i]))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
DeserializeParser::DeserializeParser()
: m_Network(nullptr, nullptr),
//May require LayerType_Max to be included
@@ -137,6 +159,7 @@ m_ParserFunctions(Layer_MAX+1, &DeserializeParser::ParseUnsupportedLayer)
m_ParserFunctions[Layer_AdditionLayer] = &DeserializeParser::ParseAdd;
m_ParserFunctions[Layer_MultiplicationLayer] = &DeserializeParser::ParseMultiplication;
m_ParserFunctions[Layer_Pooling2dLayer] = &DeserializeParser::ParsePooling2d;
+ m_ParserFunctions[Layer_ReshapeLayer] = &DeserializeParser::ParseReshape;
m_ParserFunctions[Layer_SoftmaxLayer] = &DeserializeParser::ParseSoftmax;
}
@@ -156,6 +179,8 @@ DeserializeParser::LayerBaseRawPtr DeserializeParser::GetBaseLayer(const GraphPt
return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->base();
case Layer::Layer_Pooling2dLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_Pooling2dLayer()->base();
+ case Layer::Layer_ReshapeLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base();
case Layer::Layer_SoftmaxLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_SoftmaxLayer()->base();
case Layer::Layer_NONE:
@@ -247,12 +272,12 @@ DeserializeParser::LayerBaseRawPtrVector DeserializeParser::GetGraphOutputs(cons
{
CHECK_GRAPH(graphPtr, 0);
const auto& numOutputs = graphPtr->outputIds()->size();
-
LayerBaseRawPtrVector result(numOutputs);
for (unsigned int i=0; i<numOutputs; ++i)
{
uint32_t outputId = graphPtr->outputIds()->Get(i);
+
result[i] = GetBaseLayer(graphPtr, static_cast<uint32_t>(outputId));
}
return result;
@@ -726,6 +751,79 @@ void DeserializeParser::ParsePooling2d(unsigned int layerIndex)
RegisterOutputSlots(layerIndex, layer);
}
+armnn::TensorInfo DeserializeParser::OutputShapeOfReshape(const armnn::TensorInfo& inputTensorInfo,
+ const std::vector<uint32_t>& targetDimsIn)
+{
+ std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
+ const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
+
+ if (stretchDim != targetDimsIn.end())
+ {
+ if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
+ {
+ throw ParseException(boost::str(
+ boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
+ }
+
+ auto targetNumElements =
+ boost::numeric_cast<unsigned int>(
+ std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
+
+ auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
+ outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
+ }
+
+ TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
+
+ armnn::TensorInfo reshapeInfo = inputTensorInfo;
+ reshapeInfo.SetShape(outputShape);
+
+ return reshapeInfo;
+}
+
+void DeserializeParser::ParseReshape(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto inputs = GetInputs(m_Graph, layerIndex);
+
+ auto outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
+
+ const auto targetDims = m_Graph->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->descriptor()->targetShape();
+ std::vector<uint32_t> outputDims(targetDims->begin(), targetDims->begin() + targetDims->size());
+
+ armnn::TensorInfo reshapeOutputTensorInfo = DeserializeParser::OutputShapeOfReshape(inputTensorInfo, outputDims);
+ const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
+
+ const std::vector<uint32_t> expectedDims(outputs[0]->dimensions()->begin(),
+ outputs[0]->dimensions()->begin() + outputs[0]->dimensions()->size());
+
+ if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, expectedDims))
+ {
+ std::stringstream ss;
+ ss << "New shape defined in reshape parameters "
+ << reshapeOutputTensorShape
+ << " does not equal output shape "
+ << actualOutputTensorInfo.GetShape()
+ << ": "
+ << CHECK_LOCATION().AsString();
+ throw ParseException(ss.str());
+ }
+
+ armnn::ReshapeDescriptor reshapeDesc;
+ reshapeDesc.m_TargetShape = reshapeOutputTensorShape;
+
+ auto layerName = boost::str(boost::format("Reshape:%1%") % layerIndex);
+ IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
+ layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
void DeserializeParser::ParseSoftmax(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
diff --git a/src/armnnDeserializeParser/DeserializeParser.hpp b/src/armnnDeserializeParser/DeserializeParser.hpp
index 1edb5a9f23..666cbca33c 100644
--- a/src/armnnDeserializeParser/DeserializeParser.hpp
+++ b/src/armnnDeserializeParser/DeserializeParser.hpp
@@ -53,6 +53,8 @@ public:
static int32_t GetBindingLayerInfo(const GraphPtr& graphPtr, unsigned int layerIndex);
armnn::Pooling2dDescriptor GetPoolingDescriptor(PoolingDescriptor pooling2dDescriptor,
unsigned int layerIndex);
+ static armnn::TensorInfo OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
+ const std::vector<uint32_t> & targetDimsIn);
private:
// No copying allowed until it is wanted and properly implemented
@@ -69,6 +71,7 @@ private:
void ParseAdd(unsigned int layerIndex);
void ParseMultiplication(unsigned int layerIndex);
void ParsePooling2d(unsigned int layerIndex);
+ void ParseReshape(unsigned int layerIndex);
void ParseSoftmax(unsigned int layerIndex);
void RegisterOutputSlotOfConnection(uint32_t connectionIndex, armnn::IOutputSlot* slot);
diff --git a/src/armnnDeserializeParser/DeserializerSupport.md b/src/armnnDeserializeParser/DeserializerSupport.md
index d4925cc0ad..c03471af75 100644
--- a/src/armnnDeserializeParser/DeserializerSupport.md
+++ b/src/armnnDeserializeParser/DeserializerSupport.md
@@ -8,6 +8,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers:
* Addition
* Multiplication
+* Reshape
* Softmax
More machine learning layers will be supported in future releases.
diff --git a/src/armnnDeserializeParser/test/DeserializeReshape.cpp b/src/armnnDeserializeParser/test/DeserializeReshape.cpp
new file mode 100644
index 0000000000..21e60933f6
--- /dev/null
+++ b/src/armnnDeserializeParser/test/DeserializeReshape.cpp
@@ -0,0 +1,128 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../DeserializeParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(DeserializeParser)
+
+struct ReshapeFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit ReshapeFixture(const std::string &inputShape,
+ const std::string &targetShape,
+ const std::string &outputShape,
+ const std::string &dataType)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0],
+ outputIds: [2],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape + R"(,
+ dataType: )" + dataType + R"(
+ }}]
+ }
+ }}},
+ {
+ layer_type: "ReshapeLayer",
+ layer: {
+ base: {
+ index: 1,
+ layerName: "ReshapeLayer",
+ layerType: "Reshape",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape + R"(,
+ dataType: )" + dataType + R"(
+
+ }}]},
+ descriptor: {
+ targetShape: )" + targetShape + R"(,
+ }
+
+ }},
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 2,
+ base: {
+ index: 2,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }}},
+ }]
+ }
+ )";
+ SetupSingleInputSingleOutput("InputLayer", "OutputLayer");
+ }
+};
+
+struct SimpleReshapeFixture : ReshapeFixture
+{
+ SimpleReshapeFixture() : ReshapeFixture("[ 1, 9 ]", "[ 3, 3 ]", "[ 3, 3 ]",
+ "QuantisedAsymm8") {}
+};
+
+struct SimpleReshapeFixture2 : ReshapeFixture
+{
+ SimpleReshapeFixture2() : ReshapeFixture("[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ReshapeQuantisedAsymm8, SimpleReshapeFixture)
+{
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
+}
+
+BOOST_FIXTURE_TEST_CASE(ReshapeFloat32, SimpleReshapeFixture2)
+{
+ RunTest<4, armnn::DataType::Float32>(0,
+ { 111, 85, 226, 3 },
+ { 111, 85, 226, 3 });
+}
+
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnSerializer/Schema.fbs b/src/armnnSerializer/Schema.fbs
index 048181a2b1..2b96ad8de1 100644
--- a/src/armnnSerializer/Schema.fbs
+++ b/src/armnnSerializer/Schema.fbs
@@ -73,7 +73,8 @@ enum LayerType : uint {
Multiplication = 2,
Output = 3,
Pooling2d = 4,
- Softmax = 5
+ Reshape = 5,
+ Softmax = 6
}
// Base layer table to be used as part of other layers
@@ -152,12 +153,22 @@ table OutputLayer {
base:BindableLayerBase;
}
+table ReshapeLayer {
+ base:LayerBase;
+ descriptor:ReshapeDescriptor;
+}
+
+table ReshapeDescriptor {
+ targetShape:[uint];
+}
+
union Layer {
AdditionLayer,
InputLayer,
MultiplicationLayer,
OutputLayer,
Pooling2dLayer,
+ ReshapeLayer,
SoftmaxLayer
}
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 57228c406e..b229ae7e3f 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -49,7 +49,6 @@ void SerializerVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBin
auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
flatBufferInputBaseLayer,
id);
-
// Push layer Guid to outputIds.
m_inputIds.push_back(GetSerializedId(layer->GetGuid()));
@@ -106,6 +105,31 @@ void SerializerVisitor::VisitMultiplicationLayer(const IConnectableLayer* layer,
CreateAnyLayer(flatBufferMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
}
+// Build FlatBuffer for Reshape Layer
+void SerializerVisitor::VisitReshapeLayer(const IConnectableLayer* layer,
+ const armnn::ReshapeDescriptor& reshapeDescriptor,
+ const char* name)
+{
+ // Create FlatBuffer BaseLayer
+ auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
+
+ std::vector<unsigned int> targetShape;
+ for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
+ {
+ targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
+ }
+
+ auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
+ m_flatBufferBuilder.CreateVector(targetShape));
+
+ // Create the FlatBuffer ReshapeLayer
+ auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
+ flatBufferReshapeDesc);
+
+ // Add the AnyLayer to the FlatBufferLayers
+ CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
+}
+
// Build FlatBuffer for Softmax Layer
void SerializerVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
const SoftmaxDescriptor& softmaxDescriptor,
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 169ed09cdd..e4485f5856 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -64,6 +64,10 @@ public:
const armnn::Pooling2dDescriptor& pooling2dDescriptor,
const char* name = nullptr) override;
+ void VisitReshapeLayer(const armnn::IConnectableLayer* layer,
+ const armnn::ReshapeDescriptor& reshapeDescriptor,
+ const char* name = nullptr) override;
+
private:
/// Creates the Input Slots and Output Slots and LayerBase for the layer.
diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md
index a94e0ad151..c238543b22 100644
--- a/src/armnnSerializer/SerializerSupport.md
+++ b/src/armnnSerializer/SerializerSupport.md
@@ -8,6 +8,7 @@ The Arm NN SDK Serializer currently supports the following layers:
* Addition
* Multiplication
+* Reshape
* Softmax
* Pooling2d
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 4b6bf1ec53..77bf78683a 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -91,6 +91,67 @@ BOOST_AUTO_TEST_CASE(SimpleNetworkWithMultiplicationSerialization)
BOOST_TEST(stream.str().find(multLayerName) != stream.str().npos);
}
+BOOST_AUTO_TEST_CASE(SimpleReshapeIntegration)
+{
+ armnn::NetworkId networkIdentifier;
+ armnn::IRuntime::CreationOptions options; // default options
+ armnn::IRuntimePtr run = armnn::IRuntime::Create(options);
+
+ unsigned int inputShape[] = {1, 9};
+ unsigned int outputShape[] = {3, 3};
+
+ auto inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::DataType::Float32);
+ auto outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
+ auto reshapeOutputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
+
+ armnn::ReshapeDescriptor reshapeDescriptor;
+ reshapeDescriptor.m_TargetShape = reshapeOutputTensorInfo.GetShape();
+
+ armnn::INetworkPtr network = armnn::INetwork::Create();
+ armnn::IConnectableLayer *const inputLayer = network->AddInputLayer(0);
+ armnn::IConnectableLayer *const reshapeLayer = network->AddReshapeLayer(reshapeDescriptor, "ReshapeLayer");
+ armnn::IConnectableLayer *const outputLayer = network->AddOutputLayer(0);
+
+ inputLayer->GetOutputSlot(0).Connect(reshapeLayer->GetInputSlot(0));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ reshapeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ armnnSerializer::Serializer serializer;
+ serializer.Serialize(*network);
+ std::stringstream stream;
+ serializer.SaveSerializedToStream(stream);
+ std::string const serializerString{stream.str()};
+
+ //Deserialize network.
+ auto deserializedNetwork = DeserializeNetwork(serializerString);
+
+ //Optimize the deserialized network
+ auto deserializedOptimized = Optimize(*deserializedNetwork, {armnn::Compute::CpuRef},
+ run->GetDeviceSpec());
+
+ // Load graph into runtime
+ run->LoadNetwork(networkIdentifier, std::move(deserializedOptimized));
+
+ std::vector<float> input1Data(inputTensorInfo.GetNumElements());
+ std::iota(input1Data.begin(), input1Data.end(), 8);
+
+ armnn::InputTensors inputTensors
+ {
+ {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())}
+ };
+
+ std::vector<float> outputData(input1Data.size());
+ armnn::OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
+ };
+
+ run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+
+ BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(),outputData.end(), input1Data.begin(),input1Data.end());
+}
+
BOOST_AUTO_TEST_CASE(SimpleSoftmaxIntegration)
{
armnn::TensorInfo tensorInfo({1, 10}, armnn::DataType::Float32);