aboutsummaryrefslogtreecommitdiff
path: root/src/armnnDeserializer
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnDeserializer')
-rw-r--r--src/armnnDeserializer/Deserializer.cpp984
-rw-r--r--src/armnnDeserializer/Deserializer.hpp110
-rw-r--r--src/armnnDeserializer/DeserializerSupport.md18
-rw-r--r--src/armnnDeserializer/README.md7
-rw-r--r--src/armnnDeserializer/test/DeserializeAdd.cpp161
-rw-r--r--src/armnnDeserializer/test/DeserializeConvolution2d.cpp142
-rw-r--r--src/armnnDeserializer/test/DeserializeMultiplication.cpp161
-rw-r--r--src/armnnDeserializer/test/DeserializePooling2d.cpp162
-rw-r--r--src/armnnDeserializer/test/DeserializeReshape.cpp128
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp199
-rw-r--r--src/armnnDeserializer/test/SchemaSerialize.hpp9
-rw-r--r--src/armnnDeserializer/test/SchemaSerialize.s13
12 files changed, 2094 insertions, 0 deletions
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
new file mode 100644
index 0000000000..56a6570eee
--- /dev/null
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -0,0 +1,984 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Deserializer.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Exceptions.hpp>
+
+#include <ParserHelper.hpp>
+#include <Permute.hpp>
+#include <VerificationHelpers.hpp>
+
+#include <boost/filesystem.hpp>
+#include <boost/format.hpp>
+#include <boost/core/ignore_unused.hpp>
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+#include <boost/log/trivial.hpp>
+
+// The generated code based on the Serialize schema:
+#include <Schema_generated.h>
+
+#include <fstream>
+#include <algorithm>
+#include <limits>
+#include <numeric>
+
+using armnn::ParseException;
+using namespace armnn;
+using namespace armnnSerializer;
+
+namespace armnnDeserializer
+{
+
+namespace
+{
+
+const uint32_t VIRTUAL_LAYER_ID = std::numeric_limits<uint32_t>::max();
+
+ void CheckGraph(const Deserializer::GraphPtr& graph,
+ unsigned int layersIndex,
+ const CheckLocation& location)
+{
+ if (graph->layers() == nullptr)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with invalid (null) graph. "
+ "Possible reason is that the graph is not yet loaded and Unpack(ed). "
+ "layers:%2% at %3%") %
+ location.m_Function %
+ layersIndex %
+ location.FileLine()));
+ }
+ else if (layersIndex >= graph->layers()->size())
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with an invalid layers index. "
+ "layers:%2% at %3%") %
+ location.m_Function %
+ layersIndex %
+ location.FileLine()));
+ }
+}
+
+void CheckLayers(const Deserializer::GraphPtr& graph,
+ unsigned int layersIndex,
+ unsigned int layerIndex,
+ const CheckLocation& location)
+{
+ if (graph->layers() == nullptr)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with invalid (null) graph. "
+ "Possible reason is that the graph is not yet loaded and Unpack(ed). "
+ "layers:%2% at %3%") %
+ location.m_Function %
+ layersIndex %
+ location.FileLine()));
+ }
+ else if (layersIndex >= graph->layers()->size())
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with an invalid layers index. "
+ "layers:%2% at %3%") %
+ location.m_Function %
+ layersIndex %
+ location.FileLine()));
+ }
+ else if (layerIndex >= graph->layers()[layersIndex].size()
+ && layerIndex != VIRTUAL_LAYER_ID)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with an invalid layer index. "
+ "layers:%2% layer:%3% at %4%") %
+ location.m_Function %
+ layersIndex %
+ layerIndex %
+ location.FileLine()));
+ }
+}
+
+void CheckTensorPtr(Deserializer::TensorRawPtr rawPtr,
+ const CheckLocation& location)
+{
+ if (rawPtr == nullptr)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with a null tensor pointer. "
+ "at %2%") %
+ location.m_Function %
+ location.FileLine()));
+
+ }
+}
+
+void CheckConstTensorPtr(Deserializer::ConstTensorRawPtr rawPtr,
+ const CheckLocation& location)
+{
+ if (rawPtr == nullptr)
+ {
+ throw ParseException(boost::str(boost::format("%1% was called with a null const tensor pointer. at %2%") %
+ location.m_Function %
+ location.FileLine()));
+ }
+}
+
+#define CHECK_TENSOR_PTR(TENSOR_PTR) \
+ CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
+
+#define CHECK_CONST_TENSOR_PTR(TENSOR_PTR) \
+ CheckConstTensorPtr(TENSOR_PTR, CHECK_LOCATION())
+
+#define CHECK_LAYERS(GRAPH, LAYERS_INDEX, LAYER_INDEX) \
+ CheckLayers(GRAPH, LAYERS_INDEX, LAYER_INDEX, CHECK_LOCATION())
+
+#define CHECK_GRAPH(GRAPH, LAYERS_INDEX) \
+ CheckGraph(GRAPH, LAYERS_INDEX, CHECK_LOCATION())
+}
+
+bool CheckShape(const armnn::TensorShape& actual, const std::vector<uint32_t>& expected)
+{
+ const unsigned int actualSize = actual.GetNumDimensions();
+ if (actualSize != expected.size())
+ {
+ return false;
+ }
+
+ for (unsigned int i = 0u; i < actualSize; i++)
+ {
+ if (actual[i] != static_cast<unsigned int>(expected[i]))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+Deserializer::Deserializer()
+: m_Network(nullptr, nullptr),
+//May require LayerType_Max to be included
+m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer)
+{
+ // register supported layers
+ m_ParserFunctions[Layer_AdditionLayer] = &Deserializer::ParseAdd;
+ m_ParserFunctions[Layer_Convolution2dLayer] = &Deserializer::ParseConvolution2d;
+ m_ParserFunctions[Layer_DepthwiseConvolution2dLayer] = &Deserializer::ParseDepthwiseConvolution2d;
+ m_ParserFunctions[Layer_MultiplicationLayer] = &Deserializer::ParseMultiplication;
+ m_ParserFunctions[Layer_Pooling2dLayer] = &Deserializer::ParsePooling2d;
+ m_ParserFunctions[Layer_ReshapeLayer] = &Deserializer::ParseReshape;
+ m_ParserFunctions[Layer_SoftmaxLayer] = &Deserializer::ParseSoftmax;
+}
+
+Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex)
+{
+ auto layerType = graphPtr->layers()->Get(layerIndex)->layer_type();
+
+ switch(layerType)
+ {
+ case Layer::Layer_AdditionLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_AdditionLayer()->base();
+ case Layer::Layer_Convolution2dLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_Convolution2dLayer()->base();
+ case Layer::Layer_DepthwiseConvolution2dLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_DepthwiseConvolution2dLayer()->base();
+ case Layer::Layer_InputLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->base();
+ case Layer::Layer_MultiplicationLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_MultiplicationLayer()->base();
+ case Layer::Layer_OutputLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->base();
+ case Layer::Layer_Pooling2dLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_Pooling2dLayer()->base();
+ case Layer::Layer_ReshapeLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base();
+ case Layer::Layer_SoftmaxLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_SoftmaxLayer()->base();
+ case Layer::Layer_NONE:
+ default:
+ throw ParseException(boost::str(
+ boost::format("Layer must have a type %1%") %
+ Layer::Layer_NONE));
+ }
+}
+
+int32_t Deserializer::GetBindingLayerInfo(const GraphPtr& graphPtr, unsigned int layerIndex)
+{
+ auto layerType = graphPtr->layers()->Get(layerIndex)->layer_type();
+
+ if (layerType == Layer::Layer_InputLayer)
+ {
+ return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->layerBindingId();
+ }
+ else if ( layerType == Layer::Layer_OutputLayer )
+ {
+ return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->layerBindingId();
+ }
+ return 0;
+}
+
+armnn::DataLayout ToDataLayout(armnnSerializer::DataLayout dataLayout)
+{
+ switch (dataLayout)
+ {
+ case armnnSerializer::DataLayout::DataLayout_NHWC:
+ return armnn::DataLayout::NHWC;
+ case armnnSerializer::DataLayout::DataLayout_NCHW:
+ default:
+ return armnn::DataLayout::NCHW;
+ }
+}
+
+armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
+{
+ armnn::DataType type;
+ CHECK_TENSOR_PTR(tensorPtr);
+
+ switch (tensorPtr->dataType())
+ {
+ case DataType_QuantisedAsymm8:
+ type = armnn::DataType::QuantisedAsymm8;
+ break;
+ case DataType_Signed32:
+ type = armnn::DataType::Signed32;
+ break;
+ case DataType_Float32:
+ type = armnn::DataType::Float32;
+ break;
+ case DataType_Float16:
+ type = armnn::DataType::Float16;
+ break;
+ case DataType_Boolean:
+ type = armnn::DataType::Boolean;
+ break;
+ default:
+ {
+ CheckLocation location = CHECK_LOCATION();
+ throw ParseException(
+ boost::str(
+ boost::format("Unsupported data type %1% = %2%. %3%") %
+ tensorPtr->dataType() %
+ EnumNameDataType(tensorPtr->dataType()) %
+ location.AsString()));
+ }
+ }
+ float quantizationScale = tensorPtr->quantizationScale();
+ int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
+ auto dimensions = tensorPtr->dimensions();
+ unsigned int size = dimensions->size();
+ std::vector<unsigned int> outputDims(dimensions->begin(), dimensions->begin() + size);
+
+ // two statements (on purpose) for easier debugging:
+ armnn::TensorInfo result(size,
+ outputDims.data(),
+ type,
+ quantizationScale,
+ quantizationOffset);
+ return result;
+}
+
+armnn::ConstTensor ToConstTensor(Deserializer::ConstTensorRawPtr constTensorPtr)
+{
+ CHECK_CONST_TENSOR_PTR(constTensorPtr);
+ armnn::TensorInfo tensorInfo = ToTensorInfo(constTensorPtr->info());
+
+ switch (constTensorPtr->data_type())
+ {
+ case ConstTensorData_ByteData:
+ return armnn::ConstTensor(tensorInfo, constTensorPtr->data_as_ByteData()->data()->data());
+ case ConstTensorData_ShortData:
+ return armnn::ConstTensor(tensorInfo, constTensorPtr->data_as_ShortData()->data()->data());
+ case ConstTensorData_IntData:
+ return armnn::ConstTensor(tensorInfo, constTensorPtr->data_as_IntData()->data()->data());
+ case ConstTensorData_LongData:
+ return armnn::ConstTensor(tensorInfo, constTensorPtr->data_as_LongData()->data()->data());
+ default:
+ {
+ CheckLocation location = CHECK_LOCATION();
+ throw ParseException(
+ boost::str(boost::format("Unsupported data type %1% = %2%. %3%") %
+ constTensorPtr->data_type() %
+ EnumNameConstTensorData(constTensorPtr->data_type()) %
+ location.AsString()));
+ }
+ }
+}
+
+Deserializer::LayerBaseRawPtrVector Deserializer::GetGraphInputs(const GraphPtr& graphPtr)
+{
+
+ CHECK_GRAPH(graphPtr, 0);
+ const auto& numInputs = graphPtr->inputIds()->size();
+
+ LayerBaseRawPtrVector result(numInputs);
+
+ for (unsigned int i=0; i<numInputs; ++i)
+ {
+ uint32_t inputId = graphPtr->inputIds()->Get(i);
+ result[i] = GetBaseLayer(graphPtr, static_cast<uint32_t>(inputId));
+ }
+ return result;
+}
+
+Deserializer::LayerBaseRawPtrVector Deserializer::GetGraphOutputs(const GraphPtr& graphPtr)
+{
+ CHECK_GRAPH(graphPtr, 0);
+ const auto& numOutputs = graphPtr->outputIds()->size();
+ LayerBaseRawPtrVector result(numOutputs);
+
+ for (unsigned int i=0; i<numOutputs; ++i)
+ {
+ uint32_t outputId = graphPtr->outputIds()->Get(i);
+
+ result[i] = GetBaseLayer(graphPtr, static_cast<uint32_t>(outputId));
+ }
+ return result;
+}
+
+Deserializer::TensorRawPtrVector Deserializer::GetInputs(const GraphPtr& graphPtr,
+ unsigned int layerIndex)
+{
+ CHECK_LAYERS(graphPtr, 0, layerIndex);
+ auto layer = GetBaseLayer(graphPtr, layerIndex);
+ const auto& numInputs = layer->inputSlots()->size();
+
+ TensorRawPtrVector result(numInputs);
+
+ for (unsigned int i=0; i<numInputs; ++i)
+ {
+ auto inputId = CHECKED_NON_NEGATIVE(static_cast<int32_t>
+ (layer->inputSlots()->Get(i)->connection()->sourceLayerIndex()));
+ result[i] = GetBaseLayer(graphPtr, inputId)->outputSlots()->Get(0)->tensorInfo();
+ }
+ return result;
+}
+
+Deserializer::TensorRawPtrVector Deserializer::GetOutputs(const GraphPtr& graphPtr,
+ unsigned int layerIndex)
+{
+ CHECK_LAYERS(graphPtr, 0, layerIndex);
+ auto layer = GetBaseLayer(graphPtr, layerIndex);
+ const auto& numOutputs = layer->outputSlots()->size();
+
+ TensorRawPtrVector result(numOutputs);
+
+ for (unsigned int i=0; i<numOutputs; ++i)
+ {
+ result[i] = layer->outputSlots()->Get(i)->tensorInfo();
+ }
+ return result;
+}
+
+void Deserializer::ParseUnsupportedLayer(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ const auto layerName = GetBaseLayer(m_Graph, layerIndex)->layerName()->c_str();
+ throw ParseException(
+ boost::str(
+ boost::format("Layer not supported. "
+ "layerIndex: %1% "
+ "layerName: %2% / %3%") %
+ layerIndex %
+ layerName %
+ CHECK_LOCATION().AsString()));
+}
+
+void Deserializer::ResetParser()
+{
+ m_Network = armnn::INetworkPtr(nullptr, nullptr);
+ m_Graph = nullptr;
+}
+
+IDeserializer* IDeserializer::CreateRaw()
+{
+ return new Deserializer();
+}
+
+IDeserializerPtr IDeserializer::Create()
+{
+ return IDeserializerPtr(CreateRaw(), &IDeserializer::Destroy);
+}
+
+void IDeserializer::Destroy(IDeserializer* parser)
+{
+ delete parser;
+}
+
+INetworkPtr Deserializer::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
+{
+ ResetParser();
+ m_Graph = LoadGraphFromBinary(binaryContent.data(), binaryContent.size());
+ return CreateNetworkFromGraph();
+}
+
+armnn::INetworkPtr Deserializer::CreateNetworkFromBinary(std::istream& binaryContent)
+{
+ ResetParser();
+ m_Graph = LoadGraphFromBinary(binaryContent);
+ return CreateNetworkFromGraph();
+}
+
+Deserializer::GraphPtr Deserializer::LoadGraphFromBinary(const uint8_t* binaryContent, size_t len)
+{
+ if (binaryContent == nullptr)
+ {
+ throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
+ CHECK_LOCATION().AsString()));
+ }
+ flatbuffers::Verifier verifier(binaryContent, len);
+ if (verifier.VerifyBuffer<SerializedGraph>() == false)
+ {
+ throw ParseException(
+ boost::str(boost::format("Buffer doesn't conform to the expected Armnn "
+ "flatbuffers format. size:%1% %2%") %
+ len %
+ CHECK_LOCATION().AsString()));
+ }
+ return GetSerializedGraph(binaryContent);
+}
+
+Deserializer::GraphPtr Deserializer::LoadGraphFromBinary(std::istream& binaryContent)
+{
+ std::string content((std::istreambuf_iterator<char>(binaryContent)), std::istreambuf_iterator<char>());
+ return GetSerializedGraph(content.data());
+}
+
+INetworkPtr Deserializer::CreateNetworkFromGraph()
+{
+ m_Network = INetwork::Create();
+ BOOST_ASSERT(m_Graph != nullptr);
+ unsigned int layerIndex = 0;
+ m_GraphConnections.emplace_back(m_Graph->layers()->size());
+ for (AnyLayer const* layer : *m_Graph->layers())
+ {
+ if (layer->layer_type() != Layer_InputLayer &&
+ layer->layer_type() != Layer_OutputLayer)
+ {
+ // lookup and call the parser function
+ auto& parserFunction = m_ParserFunctions[layer->layer_type()];
+ (this->*parserFunction)(layerIndex);
+ }
+ ++layerIndex;
+ }
+
+ SetupInputLayers();
+ SetupOutputLayers();
+
+ // establish the connections from the layer outputs to the inputs of the subsequent layers
+ for (size_t connectionIndex = 0; connectionIndex < m_GraphConnections[0].size(); ++connectionIndex)
+ {
+ if (m_GraphConnections[0][connectionIndex].outputSlot != nullptr)
+ {
+ for (size_t inputSlotIdx = 0;
+ inputSlotIdx < m_GraphConnections[0][connectionIndex].inputSlots.size();
+ ++inputSlotIdx)
+ {
+ m_GraphConnections[0][connectionIndex].outputSlot->Connect(
+ *(m_GraphConnections[0][connectionIndex].inputSlots[inputSlotIdx]));
+ }
+ }
+ }
+
+ return std::move(m_Network);
+}
+
+BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerIndex,
+ const std::string& name) const
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto inputs = GetGraphInputs(m_Graph);
+
+ for (auto const& input : inputs)
+ {
+ if (input->layerName()->c_str() == name)
+ {
+ int bindingId = reinterpret_cast<armnn::LayerBindingId>(GetBindingLayerInfo(m_Graph, input->index()));
+ auto layerBase = GetBaseLayer(m_Graph,input->index())->outputSlots()->Get(layerIndex);
+ return std::make_pair(bindingId, ToTensorInfo(layerBase->tensorInfo()));
+ }
+ }
+ throw ParseException(
+ boost::str(
+ boost::format("No input binding found for layer:%1% / %2%") %
+ name %
+ CHECK_LOCATION().AsString()));
+}
+
+BindingPointInfo Deserializer::GetNetworkOutputBindingInfo(unsigned int layerIndex,
+ const std::string& name) const
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto outputs = GetGraphOutputs(m_Graph);
+
+ for (auto const& output : outputs)
+ {
+ if (output->layerName()->c_str() == name)
+ {
+ int bindingId = reinterpret_cast<armnn::LayerBindingId>(GetBindingLayerInfo(m_Graph, output->index()));
+ auto layer = GetBaseLayer(m_Graph, output->index());
+ auto sourceLayerIndex = layer->inputSlots()->Get(0)->connection()->sourceLayerIndex();
+ auto sourceLayer = GetBaseLayer(m_Graph, sourceLayerIndex);
+ return std::make_pair(bindingId, ToTensorInfo(sourceLayer->outputSlots()->Get(0)->tensorInfo()));
+ }
+ }
+ throw ParseException(
+ boost::str(
+ boost::format("No output binding found for layer:%1% / %2%") %
+ name %
+ CHECK_LOCATION().AsString()));
+}
+
+void Deserializer::SetupInputLayers()
+{
+ CHECK_GRAPH(m_Graph, 0);
+ auto inputs = GetGraphInputs(m_Graph);
+ for (auto const& input : inputs)
+ {
+ IConnectableLayer* layer =
+ m_Network->AddInputLayer(GetBindingLayerInfo(m_Graph, input->index()), input->layerName()->c_str());
+
+ auto tensorInfo = ToTensorInfo(input->outputSlots()->Get(0)->tensorInfo());
+ layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ RegisterOutputSlots(input->index(), layer);
+ }
+}
+
+void Deserializer::SetupOutputLayers()
+{
+ CHECK_GRAPH(m_Graph, 0);
+ auto outputs = GetGraphOutputs(m_Graph);
+ for (auto const& output : outputs)
+ {
+ IConnectableLayer* layer =
+ m_Network->AddOutputLayer(GetBindingLayerInfo(m_Graph, output->index()), output->layerName()->c_str());
+
+ RegisterInputSlots(output->index(), layer);
+ }
+}
+
+void Deserializer::RegisterOutputSlots(uint32_t layerIndex,
+ IConnectableLayer* layer)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ BOOST_ASSERT(layer != nullptr);
+ auto parsedLayer = GetBaseLayer(m_Graph, layerIndex);
+ if (parsedLayer->outputSlots()->size() != layer->GetNumOutputSlots())
+ {
+ throw ParseException(
+ boost::str(boost::format("The number of outputslots (%1%) does not match the number expected (%2%)"
+ " for layer index: %3% %4%") %
+ parsedLayer->outputSlots()->size() %
+ layer->GetNumOutputSlots() %
+ layerIndex %
+ CHECK_LOCATION().AsString()));
+ }
+
+ for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
+ {
+ armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
+ RegisterOutputSlotOfConnection(layerIndex, slot);
+ }
+}
+
+void Deserializer::RegisterInputSlots(uint32_t layerIndex,
+ armnn::IConnectableLayer* layer)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ BOOST_ASSERT(layer != nullptr);
+ auto parsedLayer = GetBaseLayer(m_Graph, layerIndex);
+ if (parsedLayer->inputSlots()->size() != layer->GetNumInputSlots())
+ {
+ throw ParseException(
+ boost::str(boost::format("The number of inputslots (%1%) does not match the number expected (%2%)"
+ " for layer index:%3% %4%") %
+ parsedLayer->inputSlots()->size() %
+ layer->GetNumInputSlots() %
+ layerIndex %
+ CHECK_LOCATION().AsString()));
+ }
+
+ for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
+ {
+ armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
+ uint32_t sourceLayerIndex = parsedLayer->inputSlots()->Get(slotIndex)->connection()->sourceLayerIndex();
+ RegisterInputSlotOfConnection(sourceLayerIndex, slot);
+ }
+}
+
+void Deserializer::RegisterInputSlotOfConnection(uint32_t connectionIndex,
+ armnn::IInputSlot* slot)
+{
+ BOOST_ASSERT(m_GraphConnections[0].size() > connectionIndex);
+
+ Slots& slots = m_GraphConnections[0][connectionIndex];
+ slots.inputSlots.push_back(slot);
+}
+
+void Deserializer::RegisterOutputSlotOfConnection(uint32_t connectionIndex,
+ armnn::IOutputSlot* slot)
+{
+ BOOST_ASSERT(m_GraphConnections[0].size() > connectionIndex);
+
+ Slots& slots = m_GraphConnections[0][connectionIndex];
+
+ // assuming there is only one producer for that tensor
+ if (slots.outputSlot != nullptr)
+ {
+ throw ParseException(boost::str(
+ boost::format("Another layer has already registered itself as the producer of "
+ "connection:%1% / %2%") %
+ connectionIndex %
+ CHECK_LOCATION().AsString()));
+ }
+
+ slots.outputSlot = slot;
+}
+
+void Deserializer::ParseAdd(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto inputs = GetInputs(m_Graph, layerIndex);
+ CHECK_LOCATION();
+ CHECK_VALID_SIZE(inputs.size(), 2);
+
+ auto outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ m_layerName = boost::str(boost::format("Addition:%1%") % layerIndex);
+ IConnectableLayer* layer = m_Network->AddAdditionLayer(m_layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
+void Deserializer::ParseConvolution2d(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto inputs = GetInputs(m_Graph, layerIndex);
+ CHECK_LOCATION();
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto layerName = boost::str(boost::format("Convolution2d:%1%") % layerIndex);
+
+ auto serializerLayer = m_Graph->layers()->Get(layerIndex)->layer_as_Convolution2dLayer();
+ auto serializerDescriptor = serializerLayer->descriptor();
+
+ armnn::Convolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = serializerDescriptor->padLeft();
+ descriptor.m_PadRight = serializerDescriptor->padRight();
+ descriptor.m_PadTop = serializerDescriptor->padTop();
+ descriptor.m_PadBottom = serializerDescriptor->padBottom();
+ descriptor.m_StrideX = serializerDescriptor->strideX();
+ descriptor.m_StrideY = serializerDescriptor->strideY();;
+ descriptor.m_BiasEnabled = serializerDescriptor->biasEnabled();;
+ descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout());
+
+ armnn::ConstTensor weights = ToConstTensor(serializerLayer->weights());
+ armnn::ConstTensor biases;
+
+ if (descriptor.m_BiasEnabled)
+ {
+ biases = ToConstTensor(serializerLayer->biases());
+ }
+ IConnectableLayer* layer = m_Network->AddConvolution2dLayer(descriptor,
+ weights,
+ biases,
+ layerName.c_str());
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
+void Deserializer::ParseDepthwiseConvolution2d(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto inputs = GetInputs(m_Graph, layerIndex);
+ CHECK_LOCATION();
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto layerName = boost::str(boost::format("DepthwiseConvolution2d:%1%") % layerIndex);
+
+ auto serializerLayer = m_Graph->layers()->Get(layerIndex)->layer_as_DepthwiseConvolution2dLayer();
+ auto serializerDescriptor = serializerLayer->descriptor();
+
+ armnn::DepthwiseConvolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = serializerDescriptor->padLeft();
+ descriptor.m_PadRight = serializerDescriptor->padRight();
+ descriptor.m_PadTop = serializerDescriptor->padTop();
+ descriptor.m_PadBottom = serializerDescriptor->padBottom();
+ descriptor.m_StrideX = serializerDescriptor->strideX();
+ descriptor.m_StrideY = serializerDescriptor->strideY();;
+ descriptor.m_BiasEnabled = serializerDescriptor->biasEnabled();;
+ descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout());
+
+ armnn::ConstTensor weights = ToConstTensor(serializerLayer->weights());
+ armnn::ConstTensor biases;
+
+ if (descriptor.m_BiasEnabled)
+ {
+ biases = ToConstTensor(serializerLayer->biases());
+ }
+ IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(descriptor,
+ weights,
+ biases,
+ layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
+void Deserializer::ParseMultiplication(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto inputs = GetInputs(m_Graph, layerIndex);
+ CHECK_LOCATION();
+ CHECK_VALID_SIZE(inputs.size(), 2);
+
+ auto outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ m_layerName = boost::str(boost::format("Multiplication:%1%") % layerIndex);
+ IConnectableLayer* layer = m_Network->AddMultiplicationLayer(m_layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
+armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::PoolingDescriptor pooling2dDesc,
+ unsigned int layerIndex)
+{
+ armnn::Pooling2dDescriptor desc;
+
+ switch (pooling2dDesc->poolType())
+ {
+ case PoolingAlgorithm_Average:
+ {
+ desc.m_PoolType = armnn::PoolingAlgorithm::Average;
+ m_layerName = boost::str(boost::format("AveragePool2D:%1%") % layerIndex);
+ break;
+ }
+ case PoolingAlgorithm_Max:
+ {
+ desc.m_PoolType = armnn::PoolingAlgorithm::Max;
+ m_layerName = boost::str(boost::format("MaxPool2D:%1%") % layerIndex);
+ break;
+ }
+ default:
+ {
+ BOOST_ASSERT_MSG(false, "Unsupported pooling algorithm");
+ }
+ }
+
+ switch (pooling2dDesc->outputShapeRounding())
+ {
+ case OutputShapeRounding_Floor:
+ {
+ desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+ break;
+ }
+ case OutputShapeRounding_Ceiling:
+ {
+ desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
+ break;
+ }
+ default:
+ {
+ BOOST_ASSERT_MSG(false, "Unsupported output shape rounding");
+ }
+ }
+
+ switch (pooling2dDesc->paddingMethod())
+ {
+ case PaddingMethod_Exclude:
+ {
+ desc.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+ break;
+ }
+ case PaddingMethod_IgnoreValue:
+ {
+ desc.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+ break;
+ }
+ default:
+ {
+ BOOST_ASSERT_MSG(false, "Unsupported padding method");
+ }
+ }
+
+ switch (pooling2dDesc->dataLayout())
+ {
+ case DataLayout_NCHW:
+ {
+ desc.m_DataLayout = armnn::DataLayout::NCHW;
+ break;
+ }
+ case DataLayout_NHWC:
+ {
+ desc.m_DataLayout = armnn::DataLayout::NHWC;
+ break;
+ }
+ default:
+ {
+ BOOST_ASSERT_MSG(false, "Unsupported data layout");
+ }
+ }
+
+ desc.m_PadRight = pooling2dDesc->padRight();
+ desc.m_PadLeft = pooling2dDesc->padLeft();
+ desc.m_PadBottom = pooling2dDesc->padBottom();
+ desc.m_PadTop = pooling2dDesc->padTop();
+ desc.m_StrideX = pooling2dDesc->strideX();
+ desc.m_StrideY = pooling2dDesc->strideY();
+ desc.m_PoolWidth = pooling2dDesc->poolWidth();
+ desc.m_PoolHeight = pooling2dDesc->poolHeight();
+
+ return desc;
+}
+
+void Deserializer::ParsePooling2d(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+
+ auto pooling2dDes = m_Graph->layers()->Get(layerIndex)->layer_as_Pooling2dLayer()->descriptor();
+
+ auto inputs = GetInputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+ auto outputInfo = ToTensorInfo(outputs[0]);
+
+ auto pooling2dDescriptor = GetPoolingDescriptor(pooling2dDes, layerIndex);
+
+ IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, m_layerName.c_str());
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
+armnn::TensorInfo Deserializer::OutputShapeOfReshape(const armnn::TensorInfo& inputTensorInfo,
+ const std::vector<uint32_t>& targetDimsIn)
+{
+ std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
+ const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
+
+ if (stretchDim != targetDimsIn.end())
+ {
+ if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
+ {
+ throw ParseException(boost::str(
+ boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
+ }
+
+ auto targetNumElements =
+ boost::numeric_cast<unsigned int>(
+ std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
+
+ auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
+ outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
+ }
+
+ TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
+
+ armnn::TensorInfo reshapeInfo = inputTensorInfo;
+ reshapeInfo.SetShape(outputShape);
+
+ return reshapeInfo;
+}
+
+void Deserializer::ParseReshape(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto inputs = GetInputs(m_Graph, layerIndex);
+
+ auto outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
+
+ const auto targetDims = m_Graph->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->descriptor()->targetShape();
+ std::vector<uint32_t> outputDims(targetDims->begin(), targetDims->begin() + targetDims->size());
+
+ armnn::TensorInfo reshapeOutputTensorInfo = Deserializer::OutputShapeOfReshape(inputTensorInfo, outputDims);
+ const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
+
+ const std::vector<uint32_t> expectedDims(outputs[0]->dimensions()->begin(),
+ outputs[0]->dimensions()->begin() + outputs[0]->dimensions()->size());
+
+ if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, expectedDims))
+ {
+ std::stringstream ss;
+ ss << "New shape defined in reshape parameters "
+ << reshapeOutputTensorShape
+ << " does not equal output shape "
+ << actualOutputTensorInfo.GetShape()
+ << ": "
+ << CHECK_LOCATION().AsString();
+ throw ParseException(ss.str());
+ }
+
+ armnn::ReshapeDescriptor reshapeDesc;
+ reshapeDesc.m_TargetShape = reshapeOutputTensorShape;
+
+ auto layerName = boost::str(boost::format("Reshape:%1%") % layerIndex);
+ IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
+ layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
+void Deserializer::ParseSoftmax(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+
+ Deserializer::TensorRawPtrVector inputs = GetInputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ Deserializer::TensorRawPtrVector outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::SoftmaxDescriptor descriptor;
+ descriptor.m_Beta = m_Graph->layers()->Get(layerIndex)->layer_as_SoftmaxLayer()->descriptor()->beta();
+
+ const std::string layerName = boost::str(boost::format("Softmax:%1%") % layerIndex);
+ IConnectableLayer* layer = m_Network->AddSoftmaxLayer(descriptor, layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
+} // namespace armnnDeserializer
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
new file mode 100644
index 0000000000..a66508a158
--- /dev/null
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -0,0 +1,110 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "armnn/INetwork.hpp"
+#include "armnnDeserializer/IDeserializer.hpp"
+#include <Schema_generated.h>
+
+namespace armnnDeserializer
+{
+class Deserializer : public IDeserializer
+{
+public:
+ // Shorthands for deserializer types
+ using ConstTensorRawPtr = const armnnSerializer::ConstTensor *;
+ using GraphPtr = const armnnSerializer::SerializedGraph *;
+ using TensorRawPtr = const armnnSerializer::TensorInfo *;
+ using PoolingDescriptor = const armnnSerializer::Pooling2dDescriptor *;
+ using TensorRawPtrVector = std::vector<TensorRawPtr>;
+ using LayerRawPtr = const armnnSerializer::LayerBase *;
+ using LayerBaseRawPtr = const armnnSerializer::LayerBase *;
+ using LayerBaseRawPtrVector = std::vector<LayerBaseRawPtr>;
+
+public:
+
+ /// Create an input network from binary file contents
+ armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent) override;
+
+ /// Create an input network from a binary input stream
+ armnn::INetworkPtr CreateNetworkFromBinary(std::istream& binaryContent) override;
+
+ /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name
+ BindingPointInfo GetNetworkInputBindingInfo(unsigned int layerId, const std::string& name) const override;
+
+ /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name
+ BindingPointInfo GetNetworkOutputBindingInfo(unsigned int layerId, const std::string& name) const override;
+
+ Deserializer();
+ ~Deserializer() {}
+
+public:
+ // testable helpers
+ static GraphPtr LoadGraphFromBinary(const uint8_t* binaryContent, size_t len);
+ static GraphPtr LoadGraphFromBinary(std::istream& binaryContent);
+ static TensorRawPtrVector GetInputs(const GraphPtr& graph, unsigned int layerIndex);
+ static TensorRawPtrVector GetOutputs(const GraphPtr& graph, unsigned int layerIndex);
+ static LayerBaseRawPtrVector GetGraphInputs(const GraphPtr& graphPtr);
+ static LayerBaseRawPtrVector GetGraphOutputs(const GraphPtr& graphPtr);
+ static LayerBaseRawPtr GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex);
+ static int32_t GetBindingLayerInfo(const GraphPtr& graphPtr, unsigned int layerIndex);
+ armnn::Pooling2dDescriptor GetPoolingDescriptor(PoolingDescriptor pooling2dDescriptor,
+ unsigned int layerIndex);
+ static armnn::TensorInfo OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
+ const std::vector<uint32_t> & targetDimsIn);
+
+private:
+ // No copying allowed until it is wanted and properly implemented
+ Deserializer(const Deserializer&) = delete;
+ Deserializer& operator=(const Deserializer&) = delete;
+
+ /// Create the network from an already loaded flatbuffers graph
+ armnn::INetworkPtr CreateNetworkFromGraph();
+
+ // signature for the parser functions
+ using LayerParsingFunction = void(Deserializer::*)(unsigned int layerIndex);
+
+ void ParseUnsupportedLayer(unsigned int layerIndex);
+ void ParseAdd(unsigned int layerIndex);
+ void ParseConvolution2d(unsigned int layerIndex);
+ void ParseDepthwiseConvolution2d(unsigned int layerIndex);
+ void ParseMultiplication(unsigned int layerIndex);
+ void ParsePooling2d(unsigned int layerIndex);
+ void ParseReshape(unsigned int layerIndex);
+ void ParseSoftmax(unsigned int layerIndex);
+
+ void RegisterOutputSlotOfConnection(uint32_t connectionIndex, armnn::IOutputSlot* slot);
+ void RegisterInputSlotOfConnection(uint32_t connectionIndex, armnn::IInputSlot* slot);
+ void RegisterInputSlots(uint32_t layerIndex,
+ armnn::IConnectableLayer* layer);
+ void RegisterOutputSlots(uint32_t layerIndex,
+ armnn::IConnectableLayer* layer);
+ void ResetParser();
+
+ void SetupInputLayers();
+ void SetupOutputLayers();
+
+ /// The network we're building. Gets cleared after it is passed to the user
+ armnn::INetworkPtr m_Network;
+ GraphPtr m_Graph;
+ std::vector<LayerParsingFunction> m_ParserFunctions;
+ std::string m_layerName;
+
+ /// A mapping of an output slot to each of the input slots it should be connected to
+ /// The outputSlot is from the layer that creates this tensor as one of its outputs
+ /// The inputSlots are from the layers that use this tensor as one of their inputs
+ struct Slots
+ {
+ armnn::IOutputSlot* outputSlot;
+ std::vector<armnn::IInputSlot*> inputSlots;
+
+ Slots() : outputSlot(nullptr) { }
+ };
+ typedef std::vector<Slots> Connection;
+ std::vector<Connection> m_GraphConnections;
+};
+
+} //namespace armnnDeserializer
diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md
new file mode 100644
index 0000000000..86d3d02415
--- /dev/null
+++ b/src/armnnDeserializer/DeserializerSupport.md
@@ -0,0 +1,18 @@
+# The layers that ArmNN SDK Deserializer currently supports.
+
+This reference guide provides a list of layers which can be deserialized currently by the Arm NN SDK.
+
+## Fully supported
+
+The Arm NN SDK Deserialize parser currently supports the following layers:
+
+* Addition
+* Convolution2d
+* DepthwiseConvolution2d
+* FullyConnected
+* Multiplication
+* Pooling2d
+* Reshape
+* Softmax
+
+More machine learning layers will be supported in future releases.
diff --git a/src/armnnDeserializer/README.md b/src/armnnDeserializer/README.md
new file mode 100644
index 0000000000..28d6a37388
--- /dev/null
+++ b/src/armnnDeserializer/README.md
@@ -0,0 +1,7 @@
+# The Arm NN Deserializer
+
+The `armnnDeserializer` is a library for loading neural networks defined by Arm NN FlatBuffers files
+into the Arm NN runtime.
+
+For more information about the layers that are supported, and the networks that have been tested,
+see [DeserializerSupport.md](./DeserializerSupport.md) \ No newline at end of file
diff --git a/src/armnnDeserializer/test/DeserializeAdd.cpp b/src/armnnDeserializer/test/DeserializeAdd.cpp
new file mode 100644
index 0000000000..b053b10efa
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeAdd.cpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct AddFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit AddFixture(const std::string & inputShape1,
+ const std::string & inputShape2,
+ const std::string & outputShape,
+ const std::string & dataType,
+ const std::string & activation="NONE")
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0, 1],
+ outputIds: [3],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer1",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape1 + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },}},
+ },
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 1,
+ base: {
+ index:1,
+ layerName: "InputLayer2",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape2 + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },}},
+ },
+ {
+ layer_type: "AdditionLayer",
+ layer : {
+ base: {
+ index:2,
+ layerName: "AdditionLayer",
+ layerType: "Addition",
+ inputSlots: [
+ {
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ },
+ {
+ index: 1,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }
+ ],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }},
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 0,
+ base: {
+ index: 3,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:2, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }}},
+ }]
+ }
+ )";
+ Setup();
+ }
+};
+
+
+struct SimpleAddFixture : AddFixture
+{
+ SimpleAddFixture() : AddFixture("[ 2, 2 ]",
+ "[ 2, 2 ]",
+ "[ 2, 2 ]",
+ "QuantisedAsymm8") {}
+};
+
+struct SimpleAddFixture2 : AddFixture
+{
+ SimpleAddFixture2() : AddFixture("[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(AddQuantisedAsymm8, SimpleAddFixture)
+{
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"InputLayer1", { 0, 1, 2, 3 }},
+ {"InputLayer2", { 4, 5, 6, 7 }}},
+ {{"OutputLayer", { 4, 6, 8, 10 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(AddFloat32, SimpleAddFixture2)
+{
+ RunTest<4, armnn::DataType::Float32>(
+ 0,
+ {{"InputLayer1", { 111, 85, 226, 3 }},
+ {"InputLayer2", { 5, 8, 10, 12 }}},
+ {{"OutputLayer", { 116, 93, 236, 15 }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnDeserializer/test/DeserializeConvolution2d.cpp b/src/armnnDeserializer/test/DeserializeConvolution2d.cpp
new file mode 100644
index 0000000000..86f7cac3bb
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeConvolution2d.cpp
@@ -0,0 +1,142 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct Convolution2dFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit Convolution2dFixture(const std::string & inputShape1,
+ const std::string & outputShape,
+ const std::string & weightsShape,
+ const std::string & dataType)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0],
+ outputIds: [2],
+ layers: [{
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape1 + R"(,
+ dataType: )" + dataType + R"(,
+ quantizationScale: 0.5,
+ quantizationOffset: 0
+ },
+ }]
+ },
+ }
+ },
+ },
+ {
+ layer_type: "Convolution2dLayer",
+ layer : {
+ base: {
+ index:1,
+ layerName: "Convolution2dLayer",
+ layerType: "Convolution2d",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },
+ descriptor: {
+ padLeft: 1,
+ padRight: 1,
+ padTop: 1,
+ padBottom: 1,
+ strideX: 2,
+ strideY: 2,
+ biasEnabled: false,
+ dataLayout: NHWC
+ },
+ weights: {
+ info: {
+ dimensions: )" + weightsShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ data_type: IntData,
+ data: {
+ data: [
+ 1082130432, 1084227584, 1086324736,
+ 0 ,0 ,0 ,
+ 1077936128, 1073741824, 1065353216
+ ],
+ }
+ }
+ },
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 0,
+ base: {
+ index: 2,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }
+ }},
+ }]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct SimpleConvolution2dFixture : Convolution2dFixture
+{
+ SimpleConvolution2dFixture() : Convolution2dFixture("[ 1, 5, 5, 1 ]",
+ "[ 1, 3, 3, 1 ]",
+ "[ 1, 3, 3, 1 ]",
+ "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(Convolution2dFloat32, SimpleConvolution2dFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(
+ 0,
+ {{"InputLayer", {1, 5, 2, 3, 5, 8, 7, 3, 6, 3, 3, 3, 9, 1, 9, 4, 1, 8, 1, 3, 6, 8, 1, 9, 2}}},
+ {{"OutputLayer", {23, 33, 24, 91, 99, 48, 26, 50, 19}}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnDeserializer/test/DeserializeMultiplication.cpp b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
new file mode 100644
index 0000000000..a9dbfbf7da
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct MultiplicationFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit MultiplicationFixture(const std::string & inputShape1,
+ const std::string & inputShape2,
+ const std::string & outputShape,
+ const std::string & dataType,
+ const std::string & activation="NONE")
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0, 1],
+ outputIds: [3],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer1",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape1 + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },}},
+ },
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 1,
+ base: {
+ index:1,
+ layerName: "InputLayer2",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape2 + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },}},
+ },
+ {
+ layer_type: "MultiplicationLayer",
+ layer : {
+ base: {
+ index:2,
+ layerName: "MultiplicationLayer",
+ layerType: "Multiplication",
+ inputSlots: [
+ {
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ },
+ {
+ index: 1,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }
+ ],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }},
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 0,
+ base: {
+ index: 3,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:2, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }}},
+ }]
+ }
+ )";
+ Setup();
+ }
+};
+
+
+struct SimpleMultiplicationFixture : MultiplicationFixture
+{
+ SimpleMultiplicationFixture() : MultiplicationFixture("[ 2, 2 ]",
+ "[ 2, 2 ]",
+ "[ 2, 2 ]",
+ "QuantisedAsymm8") {}
+};
+
+struct SimpleMultiplicationFixture2 : MultiplicationFixture
+{
+ SimpleMultiplicationFixture2() : MultiplicationFixture("[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(MultiplicationQuantisedAsymm8, SimpleMultiplicationFixture)
+{
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"InputLayer1", { 0, 1, 2, 3 }},
+ {"InputLayer2", { 4, 5, 6, 7 }}},
+ {{"OutputLayer", { 0, 5, 12, 21 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(MultiplicationFloat32, SimpleMultiplicationFixture2)
+{
+ RunTest<4, armnn::DataType::Float32>(
+ 0,
+ {{"InputLayer1", { 100, 40, 226, 9 }},
+ {"InputLayer2", { 5, 8, 1, 12 }}},
+ {{"OutputLayer", { 500, 320, 226, 108 }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnDeserializer/test/DeserializePooling2d.cpp b/src/armnnDeserializer/test/DeserializePooling2d.cpp
new file mode 100644
index 0000000000..ef30a84342
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializePooling2d.cpp
@@ -0,0 +1,162 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct Pooling2dFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit Pooling2dFixture(const std::string &inputShape,
+ const std::string &outputShape,
+ const std::string &dataType,
+ const std::string &dataLayout,
+ const std::string &poolingAlgorithm)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0],
+ outputIds: [2],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape + R"(,
+ dataType: )" + dataType + R"(
+ }}]
+ }
+ }}},
+ {
+ layer_type: "Pooling2dLayer",
+ layer: {
+ base: {
+ index: 1,
+ layerName: "Pooling2dLayer",
+ layerType: "Pooling2d",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+
+ }}]},
+ descriptor: {
+ poolType: )" + poolingAlgorithm + R"(,
+ outputShapeRounding: "Floor",
+ paddingMethod: Exclude,
+ dataLayout: )" + dataLayout + R"(,
+ padLeft: 0,
+ padRight: 0,
+ padTop: 0,
+ padBottom: 0,
+ poolWidth: 2,
+ poolHeight: 2,
+ strideX: 2,
+ strideY: 2
+ }
+ }},
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 0,
+ base: {
+ index: 2,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }}},
+ }]
+ }
+ )";
+ SetupSingleInputSingleOutput("InputLayer", "OutputLayer");
+ }
+};
+
+struct SimpleAvgPoolingFixture : Pooling2dFixture
+{
+ SimpleAvgPoolingFixture() : Pooling2dFixture("[ 1, 2, 2, 1 ]", "[ 1, 1, 1, 1 ]",
+ "Float32", "NHWC", "Average") {}
+};
+
+struct SimpleAvgPoolingFixture2 : Pooling2dFixture
+{
+ SimpleAvgPoolingFixture2() : Pooling2dFixture("[ 1, 2, 2, 1 ]",
+ "[ 1, 1, 1, 1 ]",
+ "QuantisedAsymm8", "NHWC", "Average") {}
+};
+
+struct SimpleMaxPoolingFixture : Pooling2dFixture
+{
+ SimpleMaxPoolingFixture() : Pooling2dFixture("[ 1, 1, 2, 2 ]",
+ "[ 1, 1, 1, 1 ]",
+ "Float32", "NCHW", "Max") {}
+};
+
+struct SimpleMaxPoolingFixture2 : Pooling2dFixture
+{
+ SimpleMaxPoolingFixture2() : Pooling2dFixture("[ 1, 1, 2, 2 ]",
+ "[ 1, 1, 1, 1 ]",
+ "QuantisedAsymm8", "NCHW", "Max") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(PoolingQuantisedAsymm8Avg, SimpleAvgPoolingFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(0, { 2, 3, 5, 2 }, { 3 });
+}
+
+BOOST_FIXTURE_TEST_CASE(PoolingFloat32Avg, SimpleAvgPoolingFixture2)
+{
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+ { 20, 40, 60, 80 },
+ { 50 });
+}
+
+BOOST_FIXTURE_TEST_CASE(PoolingQuantisedAsymm8Max, SimpleMaxPoolingFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(0, { 2, 5, 5, 2 }, { 5 });
+}
+
+BOOST_FIXTURE_TEST_CASE(PoolingFloat32Max, SimpleMaxPoolingFixture2)
+{
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+ { 20, 40, 60, 80 },
+ { 80 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
diff --git a/src/armnnDeserializer/test/DeserializeReshape.cpp b/src/armnnDeserializer/test/DeserializeReshape.cpp
new file mode 100644
index 0000000000..301d8986c0
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeReshape.cpp
@@ -0,0 +1,128 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct ReshapeFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit ReshapeFixture(const std::string &inputShape,
+ const std::string &targetShape,
+ const std::string &outputShape,
+ const std::string &dataType)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0],
+ outputIds: [2],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape + R"(,
+ dataType: )" + dataType + R"(
+ }}]
+ }
+ }}},
+ {
+ layer_type: "ReshapeLayer",
+ layer: {
+ base: {
+ index: 1,
+ layerName: "ReshapeLayer",
+ layerType: "Reshape",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape + R"(,
+ dataType: )" + dataType + R"(
+
+ }}]},
+ descriptor: {
+ targetShape: )" + targetShape + R"(,
+ }
+
+ }},
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 2,
+ base: {
+ index: 2,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }}},
+ }]
+ }
+ )";
+ SetupSingleInputSingleOutput("InputLayer", "OutputLayer");
+ }
+};
+
+struct SimpleReshapeFixture : ReshapeFixture
+{
+ SimpleReshapeFixture() : ReshapeFixture("[ 1, 9 ]", "[ 3, 3 ]", "[ 3, 3 ]",
+ "QuantisedAsymm8") {}
+};
+
+struct SimpleReshapeFixture2 : ReshapeFixture
+{
+ SimpleReshapeFixture2() : ReshapeFixture("[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ReshapeQuantisedAsymm8, SimpleReshapeFixture)
+{
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
+}
+
+BOOST_FIXTURE_TEST_CASE(ReshapeFloat32, SimpleReshapeFixture2)
+{
+ RunTest<4, armnn::DataType::Float32>(0,
+ { 111, 85, 226, 3 },
+ { 111, 85, 226, 3 });
+}
+
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
new file mode 100644
index 0000000000..42ab2b17d6
--- /dev/null
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -0,0 +1,199 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "SchemaSerialize.hpp"
+
+#include <armnn/IRuntime.hpp>
+#include <armnnDeserializer/IDeserializer.hpp>
+
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+
+#include "TypeUtils.hpp"
+#include "test/TensorHelpers.hpp"
+
+#include "flatbuffers/idl.h"
+#include "flatbuffers/util.h"
+
+#include <Schema_generated.h>
+
+using armnnDeserializer::IDeserializer;
+using TensorRawPtr = armnnSerializer::TensorInfo*;
+
+struct ParserFlatbuffersSerializeFixture
+{
+ ParserFlatbuffersSerializeFixture() :
+ m_Parser(IDeserializer::Create()),
+ m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions())),
+ m_NetworkIdentifier(-1)
+ {
+ }
+
+ std::vector<uint8_t> m_GraphBinary;
+ std::string m_JsonString;
+ std::unique_ptr<IDeserializer, void (*)(IDeserializer* parser)> m_Parser;
+ armnn::IRuntimePtr m_Runtime;
+ armnn::NetworkId m_NetworkIdentifier;
+
+ /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
+ /// so they don't need to be passed to the single-input-single-output overload of RunTest().
+ std::string m_SingleInputName;
+ std::string m_SingleOutputName;
+
+ void Setup()
+ {
+ bool ok = ReadStringToBinary();
+ if (!ok)
+ {
+ throw armnn::Exception("LoadNetwork failed while reading binary input");
+ }
+
+ armnn::INetworkPtr network =
+ m_Parser->CreateNetworkFromBinary(m_GraphBinary);
+
+ if (!network)
+ {
+ throw armnn::Exception("The parser failed to create an ArmNN network");
+ }
+
+ auto optimized = Optimize(*network, {armnn::Compute::CpuRef},
+ m_Runtime->GetDeviceSpec());
+
+ std::string errorMessage;
+ armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+
+ if (ret != armnn::Status::Success)
+ {
+ throw armnn::Exception(
+ boost::str(
+ boost::format("The runtime failed to load the network. "
+ "Error was: %1%. in %2% [%3%:%4%]") %
+ errorMessage %
+ __func__ %
+ __FILE__ %
+ __LINE__));
+ }
+
+ }
+
+ void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName)
+ {
+ // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+ m_SingleInputName = inputName;
+ m_SingleOutputName = outputName;
+ Setup();
+ }
+
+ bool ReadStringToBinary()
+ {
+ std::string schemafile(&deserialize_schema_start, &deserialize_schema_end);
+
+ // parse schema first, so we can use it to parse the data after
+ flatbuffers::Parser parser;
+
+ bool ok = parser.Parse(schemafile.c_str());
+ BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+
+ ok &= parser.Parse(m_JsonString.c_str());
+ BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+
+ if (!ok)
+ {
+ return false;
+ }
+
+ {
+ const uint8_t* bufferPtr = parser.builder_.GetBufferPointer();
+ size_t size = static_cast<size_t>(parser.builder_.GetSize());
+ m_GraphBinary.assign(bufferPtr, bufferPtr+size);
+ }
+ return ok;
+ }
+
+ /// Executes the network with the given input tensor and checks the result against the given output tensor.
+ /// This overload assumes the network has a single input and a single output.
+ template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType = armnn::ResolveType<ArmnnType>>
+ void RunTest(unsigned int layersId,
+ const std::vector<DataType>& inputData,
+ const std::vector<DataType>& expectedOutputData);
+
+ /// Executes the network with the given input tensors and checks the results against the given output tensors.
+ /// This overload supports multiple inputs and multiple outputs, identified by name.
+ template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType = armnn::ResolveType<ArmnnType>>
+ void RunTest(unsigned int layersId,
+ const std::map<std::string, std::vector<DataType>>& inputData,
+ const std::map<std::string, std::vector<DataType>>& expectedOutputData);
+
+ void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector<int32_t>& shape,
+ armnnSerializer::TensorInfo tensorType, const std::string& name,
+ const float scale, const int64_t zeroPoint)
+ {
+ BOOST_CHECK_EQUAL(shapeSize, tensors->dimensions()->size());
+ BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(),
+ tensors->dimensions()->begin(), tensors->dimensions()->end());
+ BOOST_CHECK_EQUAL(tensorType.dataType(), tensors->dataType());
+ BOOST_CHECK_EQUAL(scale, tensors->quantizationScale());
+ BOOST_CHECK_EQUAL(zeroPoint, tensors->quantizationOffset());
+ }
+};
+
+template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType>
+void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId,
+ const std::vector<DataType>& inputData,
+ const std::vector<DataType>& expectedOutputData)
+{
+ RunTest<NumOutputDimensions, ArmnnType>(layersId,
+ { { m_SingleInputName, inputData } },
+ { { m_SingleOutputName, expectedOutputData } });
+}
+
+template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType>
+void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId,
+ const std::map<std::string, std::vector<DataType>>& inputData,
+ const std::map<std::string, std::vector<DataType>>& expectedOutputData)
+{
+ using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
+
+ // Setup the armnn input tensors from the given vectors.
+ armnn::InputTensors inputTensors;
+ for (auto&& it : inputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(layersId, it.first);
+ armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
+ inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+ }
+
+ // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
+ std::map<std::string, boost::multi_array<DataType, NumOutputDimensions>> outputStorage;
+ armnn::OutputTensors outputTensors;
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(layersId, it.first);
+ armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
+ outputStorage.emplace(it.first, MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second));
+ outputTensors.push_back(
+ { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
+ }
+
+ m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+
+ // Compare each output tensor to the expected values
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(layersId, it.first);
+ auto outputExpected = MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second, it.second);
+ BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+ }
+}
diff --git a/src/armnnDeserializer/test/SchemaSerialize.hpp b/src/armnnDeserializer/test/SchemaSerialize.hpp
new file mode 100644
index 0000000000..ec7e6bab6a
--- /dev/null
+++ b/src/armnnDeserializer/test/SchemaSerialize.hpp
@@ -0,0 +1,9 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+extern "C" {
+extern const char deserialize_schema_start;
+extern const char deserialize_schema_end;
+}
diff --git a/src/armnnDeserializer/test/SchemaSerialize.s b/src/armnnDeserializer/test/SchemaSerialize.s
new file mode 100644
index 0000000000..dbbb7db3e5
--- /dev/null
+++ b/src/armnnDeserializer/test/SchemaSerialize.s
@@ -0,0 +1,13 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+.section .rodata
+
+.global deserialize_schema_start
+.global deserialize_schema_end
+
+deserialize_schema_start:
+.incbin ARMNN_SERIALIZER_SCHEMA_PATH
+deserialize_schema_end: