aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2019-02-19 17:22:07 +0000
committermike.kelly <mike.kelly@arm.com>2019-02-19 18:21:11 +0000
commita0766c3d461f6a635ef0c41c83c3ba868f2fd21a (patch)
tree3565ace677fd6de80dc30476c00face7e885d46a
parent2b183fb359774cbac5d628579ec2b4a7b6b41def (diff)
downloadarmnn-a0766c3d461f6a635ef0c41c83c3ba868f2fd21a.tar.gz
IVGCVSW-2646 Add Serializer & Deserializer for Conv2D
* Added Convolution2dLayer to Schema.fbs * Added ConstTensorData serialization and deserialization helper functions * Added Convolution2d serialization and deserialization support * Added serialization and deserialization unit tests Change-Id: Id376c08410ae01511972a2b0abdce9cfab907462 Signed-off-by: Mike Kelly <mike.kelly@arm.com> Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnDeserializeParser/DeserializeParser.cpp102
-rw-r--r--src/armnnDeserializeParser/DeserializeParser.hpp2
-rw-r--r--src/armnnDeserializeParser/test/DeserializeConvolution2d.cpp142
-rw-r--r--src/armnnSerializer/Schema.fbs22
-rw-r--r--src/armnnSerializer/Serializer.cpp109
-rw-r--r--src/armnnSerializer/Serializer.hpp13
-rw-r--r--src/armnnSerializer/SerializerUtils.cpp17
-rw-r--r--src/armnnSerializer/SerializerUtils.hpp2
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp81
10 files changed, 489 insertions, 2 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 967ffb1144..68d87afb36 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -555,6 +555,7 @@ if(BUILD_UNIT_TESTS)
src/armnnSerializer/Schema_generated.h
src/armnnSerializer/test/SerializerTests.cpp
src/armnnDeserializeParser/test/DeserializeAdd.cpp
+ src/armnnDeserializeParser/test/DeserializeConvolution2d.cpp
src/armnnDeserializeParser/test/DeserializeMultiplication.cpp
src/armnnDeserializeParser/test/DeserializePooling2d.cpp
src/armnnDeserializeParser/test/DeserializeReshape.cpp
diff --git a/src/armnnDeserializeParser/DeserializeParser.cpp b/src/armnnDeserializeParser/DeserializeParser.cpp
index 9af5087cff..0259f89db4 100644
--- a/src/armnnDeserializeParser/DeserializeParser.cpp
+++ b/src/armnnDeserializeParser/DeserializeParser.cpp
@@ -121,9 +121,23 @@ void CheckTensorPtr(DeserializeParser::TensorRawPtr rawPtr,
}
}
+void CheckConstTensorPtr(DeserializeParser::ConstTensorRawPtr rawPtr,
+ const CheckLocation& location)
+{
+ if (rawPtr == nullptr)
+ {
+ throw ParseException(boost::str(boost::format("%1% was called with a null const tensor pointer. at %2%") %
+ location.m_Function %
+ location.FileLine()));
+ }
+}
+
#define CHECK_TENSOR_PTR(TENSOR_PTR) \
CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
+#define CHECK_CONST_TENSOR_PTR(TENSOR_PTR) \
+ CheckConstTensorPtr(TENSOR_PTR, CHECK_LOCATION())
+
#define CHECK_LAYERS(GRAPH, LAYERS_INDEX, LAYER_INDEX) \
CheckLayers(GRAPH, LAYERS_INDEX, LAYER_INDEX, CHECK_LOCATION())
@@ -157,6 +171,7 @@ m_ParserFunctions(Layer_MAX+1, &DeserializeParser::ParseUnsupportedLayer)
{
// register supported layers
m_ParserFunctions[Layer_AdditionLayer] = &DeserializeParser::ParseAdd;
+ m_ParserFunctions[Layer_Convolution2dLayer] = &DeserializeParser::ParseConvolution2d;
m_ParserFunctions[Layer_MultiplicationLayer] = &DeserializeParser::ParseMultiplication;
m_ParserFunctions[Layer_Pooling2dLayer] = &DeserializeParser::ParsePooling2d;
m_ParserFunctions[Layer_ReshapeLayer] = &DeserializeParser::ParseReshape;
@@ -171,6 +186,8 @@ DeserializeParser::LayerBaseRawPtr DeserializeParser::GetBaseLayer(const GraphPt
{
case Layer::Layer_AdditionLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_AdditionLayer()->base();
+ case Layer::Layer_Convolution2dLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_Convolution2dLayer()->base();
case Layer::Layer_InputLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->base();
case Layer::Layer_MultiplicationLayer:
@@ -206,6 +223,18 @@ int32_t DeserializeParser::GetBindingLayerInfo(const GraphPtr& graphPtr, unsigne
return 0;
}
+armnn::DataLayout ToDataLayout(armnn::armnnSerializer::DataLayout dataLayout)
+{
+ switch (dataLayout)
+ {
+ case armnn::armnnSerializer::DataLayout::DataLayout_NHWC:
+ return armnn::DataLayout::NHWC;
+ case armnn::armnnSerializer::DataLayout::DataLayout_NCHW:
+ default:
+ return armnn::DataLayout::NCHW;
+ }
+}
+
armnn::TensorInfo ToTensorInfo(DeserializeParser::TensorRawPtr tensorPtr)
{
armnn::DataType type;
@@ -216,6 +245,9 @@ armnn::TensorInfo ToTensorInfo(DeserializeParser::TensorRawPtr tensorPtr)
case DataType_QuantisedAsymm8:
type = armnn::DataType::QuantisedAsymm8;
break;
+ case DataType_Signed32:
+ type = armnn::DataType::Signed32;
+ break;
case DataType_Float32:
type = armnn::DataType::Float32;
break;
@@ -252,6 +284,33 @@ armnn::TensorInfo ToTensorInfo(DeserializeParser::TensorRawPtr tensorPtr)
return result;
}
+armnn::ConstTensor ToConstTensor(DeserializeParser::ConstTensorRawPtr constTensorPtr)
+{
+ CHECK_CONST_TENSOR_PTR(constTensorPtr);
+ armnn::TensorInfo tensorInfo = ToTensorInfo(constTensorPtr->info());
+
+ switch (constTensorPtr->data_type())
+ {
+ case ConstTensorData_ByteData:
+ return armnn::ConstTensor(tensorInfo, constTensorPtr->data_as_ByteData()->data()->data());
+ case ConstTensorData_ShortData:
+ return armnn::ConstTensor(tensorInfo, constTensorPtr->data_as_ShortData()->data()->data());
+ case ConstTensorData_IntData:
+ return armnn::ConstTensor(tensorInfo, constTensorPtr->data_as_IntData()->data()->data());
+ case ConstTensorData_LongData:
+ return armnn::ConstTensor(tensorInfo, constTensorPtr->data_as_LongData()->data()->data());
+ default:
+ {
+ CheckLocation location = CHECK_LOCATION();
+ throw ParseException(
+ boost::str(boost::format("Unsupported data type %1% = %2%. %3%") %
+ constTensorPtr->data_type() %
+ EnumNameConstTensorData(constTensorPtr->data_type()) %
+ location.AsString()));
+ }
+ }
+}
+
DeserializeParser::LayerBaseRawPtrVector DeserializeParser::GetGraphInputs(const GraphPtr& graphPtr)
{
@@ -603,6 +662,49 @@ void DeserializeParser::ParseAdd(unsigned int layerIndex)
RegisterOutputSlots(layerIndex, layer);
}
+void DeserializeParser::ParseConvolution2d(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto inputs = GetInputs(m_Graph, layerIndex);
+ CHECK_LOCATION();
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto layerName = boost::str(boost::format("Convolution2d:%1%") % layerIndex);
+
+ auto serializerLayer = m_Graph->layers()->Get(layerIndex)->layer_as_Convolution2dLayer();
+ auto serializerDescriptor = serializerLayer->descriptor();
+
+ armnn::Convolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = serializerDescriptor->padLeft();
+ descriptor.m_PadRight = serializerDescriptor->padRight();
+ descriptor.m_PadTop = serializerDescriptor->padTop();
+ descriptor.m_PadBottom = serializerDescriptor->padBottom();
+ descriptor.m_StrideX = serializerDescriptor->strideX();
+ descriptor.m_StrideY = serializerDescriptor->strideY();;
+ descriptor.m_BiasEnabled = serializerDescriptor->biasEnabled();;
+ descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout());
+
+ armnn::ConstTensor weights = ToConstTensor(serializerLayer->weights());
+ armnn::ConstTensor biases;
+
+ if (descriptor.m_BiasEnabled)
+ {
+ biases = ToConstTensor(serializerLayer->biases());
+ }
+ IConnectableLayer* layer = m_Network->AddConvolution2dLayer(descriptor,
+ weights,
+ biases,
+ layerName.c_str());
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
void DeserializeParser::ParseMultiplication(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
diff --git a/src/armnnDeserializeParser/DeserializeParser.hpp b/src/armnnDeserializeParser/DeserializeParser.hpp
index aee647c636..c295ee8b5f 100644
--- a/src/armnnDeserializeParser/DeserializeParser.hpp
+++ b/src/armnnDeserializeParser/DeserializeParser.hpp
@@ -15,6 +15,7 @@ class DeserializeParser : public IDeserializeParser
{
public:
// Shorthands for deserializer types
+ using ConstTensorRawPtr = const armnn::armnnSerializer::ConstTensor *;
using GraphPtr = const armnn::armnnSerializer::SerializedGraph *;
using TensorRawPtr = const armnn::armnnSerializer::TensorInfo *;
using PoolingDescriptor = const armnn::armnnSerializer::Pooling2dDescriptor *;
@@ -68,6 +69,7 @@ private:
void ParseUnsupportedLayer(unsigned int layerIndex);
void ParseAdd(unsigned int layerIndex);
+ void ParseConvolution2d(unsigned int layerIndex);
void ParseMultiplication(unsigned int layerIndex);
void ParsePooling2d(unsigned int layerIndex);
void ParseReshape(unsigned int layerIndex);
diff --git a/src/armnnDeserializeParser/test/DeserializeConvolution2d.cpp b/src/armnnDeserializeParser/test/DeserializeConvolution2d.cpp
new file mode 100644
index 0000000000..f3f6feb7a1
--- /dev/null
+++ b/src/armnnDeserializeParser/test/DeserializeConvolution2d.cpp
@@ -0,0 +1,142 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../DeserializeParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(DeserializeParser)
+
+struct Convolution2dFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit Convolution2dFixture(const std::string & inputShape1,
+ const std::string & outputShape,
+ const std::string & weightsShape,
+ const std::string & dataType)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0],
+ outputIds: [2],
+ layers: [{
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape1 + R"(,
+ dataType: )" + dataType + R"(,
+ quantizationScale: 0.5,
+ quantizationOffset: 0
+ },
+ }]
+ },
+ }
+ },
+ },
+ {
+ layer_type: "Convolution2dLayer",
+ layer : {
+ base: {
+ index:1,
+ layerName: "Convolution2dLayer",
+ layerType: "Convolution2d",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },
+ descriptor: {
+ padLeft: 1,
+ padRight: 1,
+ padTop: 1,
+ padBottom: 1,
+ strideX: 2,
+ strideY: 2,
+ biasEnabled: false,
+ dataLayout: NHWC
+ },
+ weights: {
+ info: {
+ dimensions: )" + weightsShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ data_type: IntData,
+ data: {
+ data: [
+ 1082130432, 1084227584, 1086324736,
+ 0 ,0 ,0 ,
+ 1077936128, 1073741824, 1065353216
+ ],
+ }
+ }
+ },
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 0,
+ base: {
+ index: 2,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }
+ }},
+ }]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct SimpleConvolution2dFixture : Convolution2dFixture
+{
+ SimpleConvolution2dFixture() : Convolution2dFixture("[ 1, 5, 5, 1 ]",
+ "[ 1, 3, 3, 1 ]",
+ "[ 1, 3, 3, 1 ]",
+ "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(Convolution2dFloat32, SimpleConvolution2dFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(
+ 0,
+ {{"InputLayer", {1, 5, 2, 3, 5, 8, 7, 3, 6, 3, 3, 3, 9, 1, 9, 4, 1, 8, 1, 3, 6, 8, 1, 9, 2}}},
+ {{"OutputLayer", {23, 33, 24, 91, 99, 48, 26, 50, 19}}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/Schema.fbs b/src/armnnSerializer/Schema.fbs
index 2b96ad8de1..cbc7da066f 100644
--- a/src/armnnSerializer/Schema.fbs
+++ b/src/armnnSerializer/Schema.fbs
@@ -74,7 +74,8 @@ enum LayerType : uint {
Output = 3,
Pooling2d = 4,
Reshape = 5,
- Softmax = 6
+ Softmax = 6,
+ Convolution2d = 7
}
// Base layer table to be used as part of other layers
@@ -96,6 +97,24 @@ table AdditionLayer {
base:LayerBase;
}
+table Convolution2dLayer {
+ base:LayerBase;
+ descriptor:Convolution2dDescriptor;
+ weights:ConstTensor;
+ biases:ConstTensor;
+}
+
+table Convolution2dDescriptor {
+ padLeft:uint;
+ padRight:uint;
+ padTop:uint;
+ padBottom:uint;
+ strideX:uint;
+ strideY:uint;
+ biasEnabled:bool = false;
+ dataLayout:DataLayout = NCHW;
+}
+
table InputLayer {
base:BindableLayerBase;
}
@@ -164,6 +183,7 @@ table ReshapeDescriptor {
union Layer {
AdditionLayer,
+ Convolution2dLayer,
InputLayer,
MultiplicationLayer,
OutputLayer,
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index b229ae7e3f..f475be1015 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -91,6 +91,44 @@ void SerializerVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const
CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
}
+// Build FlatBuffer for Convolution2dLayer
+void SerializerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
+ const Convolution2dDescriptor& descriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ // Create FlatBuffer BaseLayer
+ auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
+
+ auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
+ descriptor.m_PadLeft,
+ descriptor.m_PadRight,
+ descriptor.m_PadTop,
+ descriptor.m_PadBottom,
+ descriptor.m_StrideX,
+ descriptor.m_StrideY,
+ descriptor.m_BiasEnabled,
+ GetFlatBufferDataLayout(descriptor.m_DataLayout));
+ auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
+ flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
+
+ if (biases.has_value())
+ {
+ flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
+ }
+
+ // Create the FlatBuffer Convolution2dLayer
+ auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
+ flatBufferBaseLayer,
+ flatBufferDescriptor,
+ flatBufferWeightsConstTensorInfo,
+ flatBufferBiasesConstTensorInfo);
+
+ // Add the AnyLayer to the FlatBufferLayers
+ CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
+}
+
// Build FlatBuffer for Multiplication Layer
void SerializerVisitor::VisitMultiplicationLayer(const IConnectableLayer* layer, const char* name)
{
@@ -200,9 +238,78 @@ void SerializerVisitor::CreateAnyLayer(const flatbuffers::Offset<void>& layer, c
m_serializedLayers.push_back(anyLayer);
}
+template <typename T>
+flatbuffers::Offset<flatbuffers::Vector<T>> SerializerVisitor::CreateDataVector(const void* memory, unsigned int size)
+{
+ const T* buffer = reinterpret_cast<const T*>(memory);
+ std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
+ auto fbVector = m_flatBufferBuilder.CreateVector(vector);
+ return fbVector;
+}
+
+flatbuffers::Offset<serializer::ConstTensor> SerializerVisitor::CreateConstTensorInfo(const ConstTensor& constTensor)
+{
+ TensorInfo tensorInfo = constTensor.GetInfo();
+
+ // Get the dimensions
+ std::vector<unsigned int> shape;
+
+ for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
+ {
+ shape.push_back(tensorInfo.GetShape()[dim]);
+ }
+
+ // Create FlatBuffer TensorInfo
+ auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
+ m_flatBufferBuilder.CreateVector(shape),
+ GetFlatBufferDataType(tensorInfo.GetDataType()),
+ tensorInfo.GetQuantizationScale(),
+ tensorInfo.GetQuantizationOffset());
+ flatbuffers::Offset<void> fbPayload;
+
+ switch (tensorInfo.GetDataType())
+ {
+ case DataType::Float32:
+ case DataType::Signed32:
+ {
+ auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
+ flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
+ m_flatBufferBuilder,
+ fbVector);
+ fbPayload = flatBuffersData.o;
+ break;
+ }
+ case DataType::Float16:
+ {
+ auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
+ flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
+ m_flatBufferBuilder,
+ fbVector);
+ fbPayload = flatBuffersData.o;
+ break;
+ }
+ case DataType::QuantisedAsymm8:
+ case DataType::Boolean:
+ default:
+ {
+ auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
+ flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
+ m_flatBufferBuilder,
+ fbVector);
+ fbPayload = flatBuffersData.o;
+ }
+ }
+ flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
+ m_flatBufferBuilder,
+ flatBufferTensorInfo,
+ GetFlatBufferConstTensorData(tensorInfo.GetDataType()),
+ fbPayload);
+ return flatBufferConstTensor;
+}
+
std::vector<fb::Offset<serializer::InputSlot>> SerializerVisitor::CreateInputSlots(const IConnectableLayer* layer)
{
- std::vector<fb::Offset <serializer::InputSlot>> inputSlots;
+ std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
// Get the InputSlots
for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index e4485f5856..fd1a792fb0 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -45,6 +45,12 @@ public:
void VisitAdditionLayer(const armnn::IConnectableLayer* layer,
const char* name = nullptr) override;
+ void VisitConvolution2dLayer(const armnn::IConnectableLayer* layer,
+ const armnn::Convolution2dDescriptor& descriptor,
+ const armnn::ConstTensor& weights,
+ const armnn::Optional<armnn::ConstTensor>& biases,
+ const char* = nullptr) override;
+
void VisitInputLayer(const armnn::IConnectableLayer* layer,
armnn::LayerBindingId id,
const char* name = nullptr) override;
@@ -78,6 +84,13 @@ private:
/// Creates the serializer AnyLayer for the layer and adds it to m_serializedLayers.
void CreateAnyLayer(const flatbuffers::Offset<void>& layer, const armnn::armnnSerializer::Layer serializerLayer);
+ /// Creates the serializer ConstTensor for the armnn ConstTensor.
+ flatbuffers::Offset<armnn::armnnSerializer::ConstTensor> CreateConstTensorInfo(
+ const armnn::ConstTensor& constTensor);
+
+ template <typename T>
+ flatbuffers::Offset<flatbuffers::Vector<T>> CreateDataVector(const void* memory, unsigned int size);
+
///Function which maps Guid to an index
uint32_t GetSerializedId(unsigned int guid);
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index 5772eab56c..2bad85e1a0 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -11,6 +11,23 @@ namespace armnnSerializer
using namespace armnn;
namespace serializer = armnn::armnnSerializer;
+serializer::ConstTensorData GetFlatBufferConstTensorData(DataType dataType)
+{
+ switch (dataType)
+ {
+ case DataType::Float32:
+ case DataType::Signed32:
+ return serializer::ConstTensorData::ConstTensorData_IntData;
+ case DataType::Float16:
+ return serializer::ConstTensorData::ConstTensorData_ShortData;
+ case DataType::QuantisedAsymm8:
+ case DataType::Boolean:
+ return serializer::ConstTensorData::ConstTensorData_ByteData;
+ default:
+ return serializer::ConstTensorData::ConstTensorData_NONE;
+ }
+}
+
serializer::DataType GetFlatBufferDataType(DataType dataType)
{
switch (dataType)
diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp
index 72a8806560..06f3076fd6 100644
--- a/src/armnnSerializer/SerializerUtils.hpp
+++ b/src/armnnSerializer/SerializerUtils.hpp
@@ -11,6 +11,8 @@
namespace armnnSerializer
{
+armnn::armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType);
+
armnn::armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType);
armnn::armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 77bf78683a..31ef0455c3 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -65,6 +65,87 @@ BOOST_AUTO_TEST_CASE(SimpleNetworkSerialization)
BOOST_TEST(stream.str().length() > 0);
}
+BOOST_AUTO_TEST_CASE(Conv2dSerialization)
+{
+ armnn::IRuntime::CreationOptions options; // default options
+ armnn::IRuntimePtr run = armnn::IRuntime::Create(options);
+
+ armnnDeserializeParser::IDeserializeParserPtr parser = armnnDeserializeParser::IDeserializeParser::Create();
+
+ armnn::TensorInfo inputInfo(armnn::TensorShape({1, 5, 5, 1}), armnn::DataType::Float32, 1.0f, 0);
+ armnn::TensorInfo outputInfo(armnn::TensorShape({1, 3, 3, 1}), armnn::DataType::Float32, 4.0f, 0);
+
+ armnn::TensorInfo weightsInfo(armnn::TensorShape({1, 3, 3, 1}), armnn::DataType::Float32, 2.0f, 0);
+
+ std::vector<float> weightsData({4, 5, 6, 0, 0, 0, 3, 2, 1});
+
+ // Construct network
+ armnn::INetworkPtr network = armnn::INetwork::Create();
+
+ armnn::Convolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 1;
+ descriptor.m_PadRight = 1;
+ descriptor.m_PadTop = 1;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 2;
+ descriptor.m_BiasEnabled = false;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ armnn::ConstTensor weights(weightsInfo, weightsData);
+
+ armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input");
+ armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(descriptor, weights, "conv");
+ armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0, "output");
+
+ inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+ convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ armnnSerializer::Serializer serializer;
+ serializer.Serialize(*network);
+
+ std::stringstream stream;
+ serializer.SaveSerializedToStream(stream);
+
+ std::string const serializerString{stream.str()};
+ std::vector<std::uint8_t> const serializerVector{serializerString.begin(), serializerString.end()};
+
+ armnn::INetworkPtr deserializedNetwork = parser->CreateNetworkFromBinary(serializerVector);
+
+ auto deserializedOptimized = Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, run->GetDeviceSpec());
+
+ armnn::NetworkId networkIdentifier;
+
+ // Load graph into runtime
+ run->LoadNetwork(networkIdentifier, std::move(deserializedOptimized));
+
+ std::vector<float> inputData
+ {
+ 1, 5, 2, 3, 5, 8, 7, 3, 6, 3, 3, 3, 9, 1, 9, 4, 1, 8, 1, 3, 6, 8, 1, 9, 2
+ };
+ armnn::InputTensors inputTensors
+ {
+ {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}
+ };
+
+ std::vector<float> expectedOutputData
+ {
+ 23, 33, 24, 91, 99, 48, 26, 50, 19
+ };
+
+ std::vector<float> outputData(9);
+ armnn::OutputTensors outputTensors
+ {
+ {0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
+ };
+ run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+ BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(),
+ expectedOutputData.begin(), expectedOutputData.end());
+}
+
BOOST_AUTO_TEST_CASE(SimpleNetworkWithMultiplicationSerialization)
{
const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);