aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2019-02-28 10:40:49 +0000
committerJim Flynn <jim.flynn@arm.com>2019-03-07 15:43:47 +0000
commitac25a1beda8da71a82c0cf2795e2a6eaaeaa26b1 (patch)
tree2f186eac90aa72f224bd0eccaee3cffa83dea164
parentd469faf863f4ecd3ba56f27e51884ef0dfeac7bf (diff)
downloadarmnn-ac25a1beda8da71a82c0cf2795e2a6eaaeaa26b1.tar.gz
IVGCVSW-2697 Add Serialize/Deserialize for the Merger Layer
* Force generation of schema header in every build * Also fixed typo in OriginsDescriptor comment (Descriptors.hpp) * Added Serialize/Deserialize check on Addition Layer * Added Serialize/Deserialize check on Floor Layer * Added Serialize/Deserialize check on Minimum Layer * Added Serialize/Deserialize check on Maximum Layer * Added Serialize/Deserialize check on Multiplication Layer * Added Serialize/Deserialize check on Division Layer Change-Id: I1358ea4db7ca506d8bcec2ee64e1fbad6005e723 Signed-off-by: Jim Flynn <jim.flynn@arm.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--include/armnn/Descriptors.hpp2
-rw-r--r--src/armnnDeserializer/Deserializer.cpp42
-rw-r--r--src/armnnDeserializer/Deserializer.hpp1
-rw-r--r--src/armnnDeserializer/DeserializerSupport.md1
-rw-r--r--src/armnnSerializer/ArmnnSchema.fbs22
-rw-r--r--src/armnnSerializer/Serializer.cpp33
-rw-r--r--src/armnnSerializer/Serializer.hpp4
-rw-r--r--src/armnnSerializer/SerializerSupport.md1
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp105
10 files changed, 201 insertions, 11 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1dff39f24c..003c9dfc0b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -150,6 +150,7 @@ if(BUILD_TF_LITE_PARSER)
endif()
if(BUILD_ARMNN_SERIALIZER)
+ file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/src/armnnSerializer/ArmnnSchema_generated.h)
add_custom_command (
OUTPUT src/armnnSerializer/ArmnnSchema_generated.h
COMMAND ${FLATC_DIR}/flatc -o ${CMAKE_CURRENT_BINARY_DIR}/src/armnnSerializer --cpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index dc09cc60e8..2cf09746f0 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -69,7 +69,7 @@ struct OriginsDescriptor
/// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
/// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
- /// If the coord is greater than or equal to GetNumViews(), then the coord argument is out of range.
+ /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
/// Get the number of views.
uint32_t GetNumViews() const;
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index ed110ad750..d62751d640 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -201,6 +201,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer)
m_ParserFunctions[Layer_MaximumLayer] = &Deserializer::ParseMaximum;
m_ParserFunctions[Layer_MeanLayer] = &Deserializer::ParseMean;
m_ParserFunctions[Layer_MinimumLayer] = &Deserializer::ParseMinimum;
+ m_ParserFunctions[Layer_MergerLayer] = &Deserializer::ParseMerger;
m_ParserFunctions[Layer_MultiplicationLayer] = &Deserializer::ParseMultiplication;
m_ParserFunctions[Layer_NormalizationLayer] = &Deserializer::ParseNormalization;
m_ParserFunctions[Layer_PadLayer] = &Deserializer::ParsePad;
@@ -255,6 +256,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt
return graphPtr->layers()->Get(layerIndex)->layer_as_MinimumLayer()->base();
case Layer::Layer_MaximumLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_MaximumLayer()->base();
+ case Layer::Layer_MergerLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_MergerLayer()->base();
case Layer::Layer_MultiplicationLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_MultiplicationLayer()->base();
case Layer::Layer_NormalizationLayer:
@@ -1111,6 +1114,45 @@ void Deserializer::ParseMaximum(GraphPtr graph, unsigned int layerIndex)
RegisterOutputSlots(graph, layerIndex, layer);
}
+void Deserializer::ParseMerger(GraphPtr graph, unsigned int layerIndex)
+{
+ CHECK_LAYERS(graph, 0, layerIndex);
+ CHECK_LOCATION();
+
+ auto outputs = GetOutputs(graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto mergerLayer = graph->layers()->Get(layerIndex)->layer_as_MergerLayer();
+ auto layerName = GetLayerName(graph, layerIndex);
+ auto mergerDescriptor = mergerLayer->descriptor();
+ unsigned int numViews = mergerDescriptor->numViews();
+ unsigned int numDimensions = mergerDescriptor->numDimensions();
+
+ // can now check the number of inputs == number of views
+ auto inputs = GetInputs(graph, layerIndex);
+ CHECK_VALID_SIZE(inputs.size(), numViews);
+
+ armnn::OriginsDescriptor descriptor(numViews, numDimensions);
+ auto originsPtr = mergerDescriptor->viewOrigins();
+ for (unsigned int v = 0; v < numViews; ++v)
+ {
+ auto originPtr = originsPtr->Get(v);
+ for (unsigned int d = 0; d < numDimensions; ++d)
+ {
+ uint32_t value = originPtr->data()->Get(d);
+ descriptor.SetViewOriginCoord(v, d, value);
+ }
+ }
+ descriptor.SetConcatAxis(mergerDescriptor->concatAxis());
+
+ IConnectableLayer* layer = m_Network->AddMergerLayer(descriptor, layerName.c_str());
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(graph, layerIndex, layer);
+ RegisterOutputSlots(graph, layerIndex, layer);
+}
+
void Deserializer::ParseMultiplication(GraphPtr graph, unsigned int layerIndex)
{
CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index e837a08aa3..d0859469f6 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -89,6 +89,7 @@ private:
void ParseMaximum(GraphPtr graph, unsigned int layerIndex);
void ParseMean(GraphPtr graph, unsigned int layerIndex);
void ParseMinimum(GraphPtr graph, unsigned int layerIndex);
+ void ParseMerger(GraphPtr graph, unsigned int layerIndex);
void ParseMultiplication(GraphPtr graph, unsigned int layerIndex);
void ParseNormalization(GraphPtr graph, unsigned int layerIndex);
void ParsePad(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md
index 0f3d91d1e7..3762134b32 100644
--- a/src/armnnDeserializer/DeserializerSupport.md
+++ b/src/armnnDeserializer/DeserializerSupport.md
@@ -21,6 +21,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers:
* Greater
* Maximum
* Mean
+* Merger
* Minimum
* Multiplication
* Normalization
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 01142ff391..a5fb4b6697 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -110,7 +110,8 @@ enum LayerType : uint {
Subtraction = 26,
StridedSlice = 27,
Gather = 28,
- Mean = 29
+ Mean = 29,
+ Merger = 30
}
// Base layer table to be used as part of other layers
@@ -415,6 +416,22 @@ table StridedSliceDescriptor {
dataLayout:DataLayout;
}
+table MergerLayer {
+ base:LayerBase;
+ descriptor:OriginsDescriptor;
+}
+
+table UintVector {
+ data:[uint];
+}
+
+table OriginsDescriptor {
+ concatAxis:uint;
+ numViews:uint;
+ numDimensions:uint;
+ viewOrigins:[UintVector];
+}
+
union Layer {
ActivationLayer,
AdditionLayer,
@@ -445,7 +462,8 @@ union Layer {
SubtractionLayer,
StridedSliceLayer,
GatherLayer,
- MeanLayer
+ MeanLayer,
+ MergerLayer
}
table AnyLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 56c4281a87..3b71e5fc7d 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -361,6 +361,39 @@ void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer,
CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
}
+void SerializerVisitor::VisitMergerLayer(const armnn::IConnectableLayer* layer,
+ const armnn::OriginsDescriptor& mergerDescriptor,
+ const char* name)
+{
+ auto flatBufferMergerBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merger);
+
+ std::vector<flatbuffers::Offset<UintVector>> views;
+ for (unsigned int v = 0; v < mergerDescriptor.GetNumViews(); ++v)
+ {
+ const uint32_t* origin = mergerDescriptor.GetViewOrigin(v);
+ std::vector<uint32_t> origins;
+ for (unsigned int d = 0; d < mergerDescriptor.GetNumDimensions(); ++d)
+ {
+ origins.push_back(origin[d]);
+ }
+ auto view = m_flatBufferBuilder.CreateVector(origins);
+ auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
+ views.push_back(uintVector);
+ }
+
+ auto flatBufferMergerDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
+ mergerDescriptor.GetConcatAxis(),
+ mergerDescriptor.GetNumViews(),
+ mergerDescriptor.GetNumDimensions(),
+ m_flatBufferBuilder.CreateVector(views));
+
+ auto flatBufferLayer = CreateMergerLayer(m_flatBufferBuilder,
+ flatBufferMergerBaseLayer,
+ flatBufferMergerDescriptor);
+
+ CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_MergerLayer);
+}
+
void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
{
auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index f928c37d30..e93e4cead6 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -112,6 +112,10 @@ public:
void VisitMaximumLayer(const armnn::IConnectableLayer* layer,
const char* name = nullptr) override;
+ void VisitMergerLayer(const armnn::IConnectableLayer* layer,
+ const armnn::OriginsDescriptor& mergerDescriptor,
+ const char* name = nullptr) override;
+
void VisitMultiplicationLayer(const armnn::IConnectableLayer* layer,
const char* name = nullptr) override;
diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md
index bb50242e90..ae8691ed04 100644
--- a/src/armnnSerializer/SerializerSupport.md
+++ b/src/armnnSerializer/SerializerSupport.md
@@ -21,6 +21,7 @@ The Arm NN SDK Serializer currently supports the following layers:
* Greater
* Maximum
* Mean
+* Merger
* Minimum
* Multiplication
* Normalization
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index a18ae32a03..5a054c210c 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -148,7 +148,7 @@ void CheckDeserializedNetworkAgainstOriginal(const armnn::INetwork& deserialized
BOOST_AUTO_TEST_SUITE(SerializerTests)
-BOOST_AUTO_TEST_CASE(SerializeAddition)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeAddition)
{
class VerifyAdditionName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
{
@@ -188,6 +188,12 @@ BOOST_AUTO_TEST_CASE(SerializeAddition)
VerifyAdditionName nameChecker;
deserializedNetwork->Accept(nameChecker);
+
+ CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+ *network,
+ {info.GetShape(), info.GetShape()},
+ {info.GetShape()},
+ {0, 1});
}
BOOST_AUTO_TEST_CASE(SerializeConstant)
@@ -255,7 +261,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeConstant)
{commonTensorInfo.GetShape()});
}
-BOOST_AUTO_TEST_CASE(SerializeFloor)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeFloor)
{
class VerifyFloorName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
{
@@ -269,7 +275,7 @@ BOOST_AUTO_TEST_CASE(SerializeFloor)
const armnn::TensorInfo info({4,4}, armnn::DataType::Float32);
armnn::INetworkPtr network = armnn::INetwork::Create();
- armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(1);
+ armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
const char* floorLayerName = "floor";
@@ -295,9 +301,14 @@ BOOST_AUTO_TEST_CASE(SerializeFloor)
VerifyFloorName nameChecker;
deserializedNetwork->Accept(nameChecker);
+
+ CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+ *network,
+ {info.GetShape()},
+ {info.GetShape()});
}
-BOOST_AUTO_TEST_CASE(SerializeMinimum)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeMinimum)
{
class VerifyMinimumName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
{
@@ -346,9 +357,15 @@ BOOST_AUTO_TEST_CASE(SerializeMinimum)
VerifyMinimumName nameChecker(minimumLayerName);
deserializedNetwork->Accept(nameChecker);
+
+ CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+ *network,
+ {info.GetShape(), info.GetShape()},
+ {info.GetShape()},
+ {0, 1});
}
-BOOST_AUTO_TEST_CASE(SerializeMaximum)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeMaximum)
{
class VerifyMaximumName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
{
@@ -397,9 +414,15 @@ BOOST_AUTO_TEST_CASE(SerializeMaximum)
VerifyMaximumName nameChecker(maximumLayerName);
deserializedNetwork->Accept(nameChecker);
+
+ CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+ *network,
+ {info.GetShape(), info.GetShape()},
+ {info.GetShape()},
+ {0, 1});
}
-BOOST_AUTO_TEST_CASE(SerializeMultiplication)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeMultiplication)
{
class VerifyMultiplicationName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
{
@@ -442,6 +465,12 @@ BOOST_AUTO_TEST_CASE(SerializeMultiplication)
VerifyMultiplicationName nameChecker;
deserializedNetwork->Accept(nameChecker);
+
+ CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+ *network,
+ {info.GetShape(), info.GetShape()},
+ {info.GetShape()},
+ {0, 1});
}
BOOST_AUTO_TEST_CASE(SerializeDeserializeConvolution2d)
@@ -998,7 +1027,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeBatchNormalization)
{outputInfo.GetShape()});
}
-BOOST_AUTO_TEST_CASE(SerializeDivision)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeDivision)
{
class VerifyDivisionName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
{
@@ -1041,6 +1070,12 @@ BOOST_AUTO_TEST_CASE(SerializeDivision)
VerifyDivisionName nameChecker;
deserializedNetwork->Accept(nameChecker);
+
+ CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+ *network,
+ {info.GetShape(), info.GetShape()},
+ {info.GetShape()},
+ {0, 1});
}
BOOST_AUTO_TEST_CASE(SerializeDeserializeNormalization)
@@ -1173,7 +1208,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializePad)
{outputTensorInfo.GetShape()});
}
-BOOST_AUTO_TEST_CASE(SerializeRsqrt)
+BOOST_AUTO_TEST_CASE(SerializeDeserializeRsqrt)
{
class VerifyRsqrtName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
{
@@ -1374,4 +1409,58 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeMean)
{outputTensorInfo.GetShape()});
}
+BOOST_AUTO_TEST_CASE(SerializeDeserializeMerger)
+{
+ class VerifyMergerName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+ {
+ public:
+ void VisitMergerLayer(const armnn::IConnectableLayer* layer,
+ const armnn::OriginsDescriptor& mergerDescriptor,
+ const char* name = nullptr) override
+ {
+ BOOST_TEST(name == "MergerLayer");
+ }
+ };
+
+ unsigned int inputShapeOne[] = {2, 3, 2, 2};
+ unsigned int inputShapeTwo[] = {2, 3, 2, 2};
+ unsigned int outputShape[] = {4, 3, 2, 2};
+
+ const armnn::TensorInfo inputOneTensorInfo = armnn::TensorInfo(4, inputShapeOne, armnn::DataType::Float32);
+ const armnn::TensorInfo inputTwoTensorInfo = armnn::TensorInfo(4, inputShapeTwo, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+ std::vector<armnn::TensorShape> shapes;
+ shapes.push_back(inputOneTensorInfo.GetShape());
+ shapes.push_back(inputTwoTensorInfo.GetShape());
+
+ armnn::MergerDescriptor descriptor =
+ armnn::CreateMergerDescriptorForConcatenation(shapes.begin(), shapes.end(), 0);
+
+ armnn::INetworkPtr network = armnn::INetwork::Create();
+ armnn::IConnectableLayer* const inputLayerOne = network->AddInputLayer(0);
+ inputLayerOne->GetOutputSlot(0).SetTensorInfo(inputOneTensorInfo);
+ armnn::IConnectableLayer* const inputLayerTwo = network->AddInputLayer(1);
+ inputLayerTwo->GetOutputSlot(0).SetTensorInfo(inputTwoTensorInfo);
+ armnn::IConnectableLayer* const mergerLayer = network->AddMergerLayer(descriptor, "MergerLayer");
+ mergerLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+ armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+ inputLayerOne->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
+ inputLayerTwo->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
+ mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+ BOOST_CHECK(deserializedNetwork);
+
+ VerifyMergerName nameChecker;
+ deserializedNetwork->Accept(nameChecker);
+
+ CheckDeserializedNetworkAgainstOriginal<float>(*deserializedNetwork,
+ *network,
+ {inputOneTensorInfo.GetShape(), inputTwoTensorInfo.GetShape()},
+ {outputTensorInfo.GetShape()},
+ {0, 1});
+}
+
BOOST_AUTO_TEST_SUITE_END()