aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser
diff options
context:
space:
mode:
authortelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
committertelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
commitc577f2c6a3b4ddb6ba87a882723c53a248afbeba (patch)
treebd7d4c148df27f8be6649d313efb24f536b7cf34 /src/armnnTfLiteParser
parent4c7098bfeab1ffe1cdc77f6c15548d3e73274746 (diff)
downloadarmnn-c577f2c6a3b4ddb6ba87a882723c53a248afbeba.tar.gz
Release 18.08
Diffstat (limited to 'src/armnnTfLiteParser')
-rw-r--r--src/armnnTfLiteParser/README.md7
-rw-r--r--src/armnnTfLiteParser/TensorFlowLiteSupport.md27
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp1440
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp156
-rw-r--r--src/armnnTfLiteParser/test/AvgPool2D.cpp119
-rw-r--r--src/armnnTfLiteParser/test/Conv2D.cpp351
-rw-r--r--src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp199
-rw-r--r--src/armnnTfLiteParser/test/GetBuffer.cpp126
-rw-r--r--src/armnnTfLiteParser/test/GetInputsOutputs.cpp239
-rw-r--r--src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp230
-rw-r--r--src/armnnTfLiteParser/test/GetTensorIds.cpp162
-rw-r--r--src/armnnTfLiteParser/test/InputOutputTensorNames.cpp138
-rw-r--r--src/armnnTfLiteParser/test/LoadModel.cpp241
-rw-r--r--src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp61
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp229
-rw-r--r--src/armnnTfLiteParser/test/Softmax.cpp78
-rw-r--r--src/armnnTfLiteParser/test/Squeeze.cpp144
17 files changed, 3947 insertions, 0 deletions
diff --git a/src/armnnTfLiteParser/README.md b/src/armnnTfLiteParser/README.md
new file mode 100644
index 0000000000..aeb79eee46
--- /dev/null
+++ b/src/armnnTfLiteParser/README.md
@@ -0,0 +1,7 @@
+# The Arm NN TensorFlow Lite parser
+
+`armnnTfLiteParser` is a library for loading neural networks defined by TensorFlow Lite FlatBuffers files
+into the Arm NN runtime.
+
+For more information about the TensorFlow Lite operators that are supported, and the networks that have been tested,
+see [TensorFlowLiteSupport.md](./TensorFlowLiteSupport.md) \ No newline at end of file
diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
new file mode 100644
index 0000000000..8a58147fcb
--- /dev/null
+++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
@@ -0,0 +1,27 @@
+# TensorFlow Lite operators that the Arm NN SDK supports
+
+This reference guide provides a list of TensorFlow Lite operators the Arm NN SDK currently supports.
+
+The Arm NN SDK TensorFlow Lite parser currently only supports uint8.
+
+## Fully supported
+
+The Arm NN SDK TensorFlow Lite parser currently supports the following operators:
+
+* AVERAGE_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
+
+* CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
+
+* DEPTHWISE_CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
+
+* SOFTMAX
+
+* SQUEEZE
+
+## Tested networks
+
+Arm tested these operators with the following TensorFlow Lite neural network:
+
+* [Quantized MobileNet](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz)
+
+More machine learning operators will be supported in future releases. \ No newline at end of file
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
new file mode 100644
index 0000000000..d5c48a10e2
--- /dev/null
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -0,0 +1,1440 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "TfLiteParser.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Exceptions.hpp>
+#include <armnn/TypesUtils.hpp>
+#include <boost/filesystem.hpp>
+
+// armnnUtils:
+#include <Permute.hpp>
+#include <VerificationHelpers.hpp>
+
+// The generated code based on the Tf Lite schema:
+#include <schema_generated.h>
+
+#include <boost/core/ignore_unused.hpp>
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+#include <boost/log/trivial.hpp>
+
+#include <fstream>
+#include <algorithm>
+#include <limits>
+
+using namespace armnn;
+using armnn::CheckLocation;
+namespace armnnTfLiteParser
+{
+namespace
+{
+const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
+const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
+
+const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
+
+void CheckSubgraph(const TfLiteParser::ModelPtr & model,
+ size_t subgraphIndex,
+ const CheckLocation & location)
+{
+ if (model.get() == nullptr)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with invalid (null) model. "
+ "Possible reason is that the model is not yet loaded and Unpack(ed). "
+ "subgraph:%2% at %3%") %
+ location.m_Function %
+ subgraphIndex %
+ location.FileLine()));
+ }
+ else if (subgraphIndex >= model->subgraphs.size())
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with an invalid subgraph index. "
+ "subgraph:%2% at %3%") %
+ location.m_Function %
+ subgraphIndex %
+ location.FileLine()));
+ }
+}
+
+#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
+ CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
+
+void CheckModel(const TfLiteParser::ModelPtr & model,
+ size_t subgraphIndex,
+ size_t operatorIndex,
+ const CheckLocation & location)
+{
+ if (model.get() == nullptr)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with invalid (null) model. "
+ "Possible reason is that the model is not yet loaded and Unpack(ed). "
+ "subgraph:%2% operator:%3% at %4%") %
+ location.m_Function %
+ subgraphIndex %
+ operatorIndex %
+ location.FileLine()));
+ }
+ else if (subgraphIndex >= model->subgraphs.size())
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with an invalid subgraph index. "
+ "subgraph:%2% operator:%3% at %4%") %
+ location.m_Function %
+ subgraphIndex %
+ operatorIndex %
+ location.FileLine()));
+ }
+ else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
+ operatorIndex != VIRTUAL_OPERATOR_ID)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with an invalid operator index. "
+ "subgraph:%2% operator:%3% at %4%") %
+ location.m_Function %
+ subgraphIndex %
+ operatorIndex %
+ location.FileLine()));
+ }
+}
+
+#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
+ CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
+
+void CheckTensor(const TfLiteParser::ModelPtr & model,
+ size_t subgraphIndex,
+ size_t tensorIndex,
+ const CheckLocation & location)
+{
+ // not checking model, because I assume CHECK_MODEL already run
+ // and checked that. An assert would do.
+ BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
+
+ // also subgraph index should be checked by CHECK_MODEL so
+ // I only add an assert here
+ BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
+
+ // the tensor index is the only one to check here
+ if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with an invalid tensor index. "
+ "subgraph:%2% tensor:%3% at %4%") %
+ location.m_Function %
+ subgraphIndex %
+ tensorIndex %
+ location.FileLine()));
+ }
+}
+
+#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
+ CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
+
+void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
+ const CheckLocation & location)
+{
+ if (rawPtr == nullptr)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with a null tensor pointer. "
+ "at %2%") %
+ location.m_Function %
+ location.FileLine()));
+
+ }
+}
+
+#define CHECK_TENSOR_PTR(TENSOR_PTR) \
+ CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
+
+void CheckBuffer(const TfLiteParser::ModelPtr & model,
+ size_t bufferIndex,
+ const CheckLocation & location)
+{
+ if (model.get() == nullptr)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with invalid (null) model. "
+ "Possible reason is that the model is not yet loaded and Unpack(ed). "
+ "buffer:%2% at %3%") %
+ location.m_Function %
+ bufferIndex %
+ location.FileLine()));
+ }
+ else if (bufferIndex >= model->buffers.size())
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("%1% was called with an invalid buffer index. "
+ "buffer index:%2% at %3%") %
+ location.m_Function %
+ bufferIndex %
+ location.FileLine()));
+ }
+ else if (model->buffers[bufferIndex].get() == nullptr)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("The buffer #%1% is null. %3%") %
+ bufferIndex %
+ location.AsString()));
+ }
+}
+
+#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
+ CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
+
+void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
+ const armnn::TensorInfo & tensorInfo,
+ uint32_t bufferId,
+ const CheckLocation & location)
+{
+ if (bufferPtr == nullptr)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("BufferPtr is null for buffer:%1%. %2%") %
+ bufferId %
+ location.AsString()));
+ }
+ else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
+ tensorInfo.GetNumBytes() > bufferPtr->data.size())
+ {
+ std::stringstream ss;
+ ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
+ << "For tensor: " << tensorInfo.GetShape()
+ << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
+ << tensorInfo.GetNumElements() << " elements. " << location.AsString();
+ throw ParseException(ss.str());
+ }
+}
+
+#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
+ CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
+
+bool IsActivationSupported(tflite::ActivationFunctionType activationType)
+{
+ switch(activationType)
+ {
+ case tflite::ActivationFunctionType_NONE:
+ case tflite::ActivationFunctionType_RELU:
+ case tflite::ActivationFunctionType_RELU6:
+ case tflite::ActivationFunctionType_TANH:
+ {
+ return true;
+ }
+ default:
+ {
+ return false;
+ }
+ }
+}
+
+#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
+ do { \
+ if (IsActivationSupported(OPTION->fused_activation_function) == false) \
+ { \
+ throw ParseException( \
+ boost::str( \
+ boost::format("TfLite parser doesn't suppport fused activation: " \
+ "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
+ OPTION->fused_activation_function % \
+ tflite::EnumNameActivationFunctionType(\
+ OPTION->fused_activation_function) % \
+ __func__ % \
+ SUBGRAPH_INDEX % \
+ OPERATOR_INDEX % \
+ CHECK_LOCATION().FileLine())); \
+ } \
+ } while(false)
+
+
+std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
+{
+ std::vector<unsigned int> result;
+ result.reserve(in.size());
+ for (auto & i : in)
+ {
+ result.push_back(CHECKED_NON_NEGATIVE(i));
+ }
+ return result;
+}
+
+void CalcPadding(uint32_t inputSize,
+ uint32_t filterSize,
+ uint32_t stride,
+ uint32_t& paddingFront,
+ uint32_t& paddingBack,
+ tflite::Padding padding)
+{
+ paddingFront = 0;
+ paddingBack = 0;
+ if (padding == tflite::Padding_SAME)
+ {
+ uint32_t outputSize = (inputSize + stride - 1) / stride;
+ uint32_t temp = (outputSize - 1) * stride + filterSize;
+ if (temp > inputSize)
+ {
+ paddingFront = (temp - inputSize) / 2;
+ paddingBack = (temp - inputSize) - paddingFront;
+ }
+ }
+}
+
+armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
+{
+ armnn::DataType type;
+ CHECK_TENSOR_PTR(tensorPtr);
+
+ switch (tensorPtr->type)
+ {
+ case tflite::TensorType_UINT8:
+ type = armnn::DataType::QuantisedAsymm8;
+ break;
+ case tflite::TensorType_FLOAT32:
+ type = armnn::DataType::Float32;
+ break;
+ case tflite::TensorType_INT32:
+ type = armnn::DataType::Signed32;
+ break;
+
+ default:
+ {
+ CheckLocation location = CHECK_LOCATION();
+ throw ParseException(
+ boost::str(
+ boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
+ tensorPtr->type %
+ tflite::EnumNameTensorType(tensorPtr->type) %
+ tensorPtr->name %
+ location.AsString()));
+ }
+ }
+
+ float quantizationScale = 0.0f;
+ int32_t quantizationOffset = 0;
+
+ if (tensorPtr->quantization.get())
+ {
+ CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
+ CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
+
+ if (tensorPtr->quantization->scale.size() == 1)
+ {
+ quantizationScale = tensorPtr->quantization->scale[0];
+ }
+ if (tensorPtr->quantization->zero_point.size() == 1)
+ {
+ // NOTE: we lose precision here when converting from 64 bit to 32
+ // but this is what we support at the monent in ArmNN
+ quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
+ }
+ }
+
+ auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
+
+ // two statements (on purpose) for easier debugging:
+ armnn::TensorInfo result(static_cast<unsigned int>(tensorPtr->shape.size()),
+ dimensions.data(),
+ type,
+ quantizationScale,
+ quantizationOffset);
+ return result;
+}
+
+template<typename T>
+std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
+CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
+ TfLiteParser::TensorRawPtr tensorPtr,
+ armnn::TensorInfo & tensorInfo,
+ bool convertFromTfToArmnnFormat)
+{
+ BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
+ BOOST_ASSERT_MSG(bufferPtr != nullptr,
+ boost::str(
+ boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
+
+ std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
+
+ if (convertFromTfToArmnnFormat)
+ {
+ tensorInfo = armnnUtils::Permuted(tensorInfo, NHWCToArmNN);
+ armnnUtils::Permute(tensorInfo.GetShape(),
+ NHWCToArmNN,
+ reinterpret_cast<const T *>(bufferPtr->data.data()),
+ data.get());
+ }
+ else
+ {
+ ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
+ }
+ return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
+}
+
+IConnectableLayer* SwizzleIn(INetwork& network,
+ IConnectableLayer* layer,
+ unsigned int inputSlotIndex,
+ const TensorInfo & inputInfo)
+{
+ BOOST_ASSERT(layer != nullptr);
+ // Add swizzle layer
+ std::stringstream name;
+ name << "swizzle_for-" << layer->GetName() << ":in" << inputSlotIndex;
+ IConnectableLayer* const swizzleLayer = network.AddPermuteLayer(NHWCToArmNN, name.str().c_str());
+ // Set swizzled output shape
+ const TensorInfo swizzleOutInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
+ swizzleLayer->GetOutputSlot(0).SetTensorInfo(swizzleOutInfo);
+ // Connect the swizzle layer to the actual layer
+ swizzleLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(inputSlotIndex));
+
+ return swizzleLayer;
+}
+
+IConnectableLayer* DeswizzleOut(INetwork& network,
+ IConnectableLayer* layer,
+ unsigned int outputSlotIndex,
+ const TensorInfo & outputInfo)
+{
+ BOOST_ASSERT(layer != nullptr);
+ // Add deswizzle layer
+ std::stringstream name;
+ name << "deswizzle_for-" << layer->GetName() << ":out" << outputSlotIndex;
+ IConnectableLayer* const deswizzleLayer = network.AddPermuteLayer(ArmNNToNHWC, name.str().c_str());
+ // Set deswizzled output shape
+ deswizzleLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+ // Set original layer output shape
+ const TensorInfo deswizzleOutInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
+ layer->GetOutputSlot(outputSlotIndex).SetTensorInfo(deswizzleOutInfo);
+ // Connect the actual layer to the deswizzle layer
+ layer->GetOutputSlot(outputSlotIndex).Connect(deswizzleLayer->GetInputSlot(0));
+
+ return deswizzleLayer;
+}
+
+std::pair<IConnectableLayer*, IConnectableLayer*> SwizzleInDeswizzleOut(INetwork& network,
+ IConnectableLayer* layer,
+ unsigned int inputSlotIndex,
+ const TensorInfo & inputInfo,
+ unsigned int outputSlotIndex,
+ const TensorInfo & outputInfo)
+{
+ IConnectableLayer* const swizzleLayer = SwizzleIn(network, layer, inputSlotIndex, inputInfo);
+ IConnectableLayer* const deswizzleLayer = DeswizzleOut(network, layer, outputSlotIndex, outputInfo);
+ return std::make_pair(swizzleLayer, deswizzleLayer);
+}
+
+armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
+{
+ // generate the binding id by shifting the tensor id by 8 bit
+ // and add the subgraph id, which allows 256 subgraphs
+ return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
+}
+
+} // <anonymous>
+
+TfLiteParser::TfLiteParser()
+: m_Network(nullptr, nullptr)
+, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
+{
+ // register supported operators
+ m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
+ m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
+ m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
+ m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
+ m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
+}
+
+void TfLiteParser::ResetParser()
+{
+ m_Network = armnn::INetworkPtr(nullptr, nullptr);
+ m_Model = nullptr;
+ m_SubgraphConnections.clear();
+}
+
+INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
+{
+ ResetParser();
+ m_Model = LoadModelFromFile(graphFile);
+ return CreateNetworkFromModel();
+}
+
+INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
+{
+ ResetParser();
+ m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
+ return CreateNetworkFromModel();
+}
+
+INetworkPtr TfLiteParser::CreateNetworkFromModel()
+{
+ m_Network = INetwork::Create();
+ BOOST_ASSERT(m_Model.get() != nullptr);
+
+ bool failedToCreate = false;
+ std::stringstream errors;
+
+ if (m_Model->subgraphs.size() != 1)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
+ m_Model->subgraphs.size() %
+ CHECK_LOCATION().AsString()));
+ }
+
+ size_t subgraphIndex = 0;
+ for (SubGraphPtr const & subgraph : m_Model->subgraphs)
+ {
+ m_SubgraphConnections.emplace_back(subgraph->tensors.size());
+
+ size_t operatorIndex = 0;
+ for (OperatorPtr const & op : subgraph->operators)
+ {
+ try
+ {
+ if (op->custom_options.size() > 0)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("Custom options for op: %1% is not supported. "
+ "It has %2% bytes of custom options. %3%") %
+ op->opcode_index %
+ op->custom_options.size() %
+ CHECK_LOCATION().AsString()));
+ }
+
+ auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
+ auto builtinCode = opCodePtr->builtin_code;
+
+ if (builtinCode > tflite::BuiltinOperator_MAX)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("Operator code %1% is out of range 0-%2%. "
+ "subgraph:%3% operator idx:%4%. %5%") %
+ builtinCode %
+ tflite::BuiltinOperator_MAX %
+ subgraphIndex %
+ operatorIndex %
+ CHECK_LOCATION().AsString()));
+ }
+
+ // lookup and call the parser function
+ auto & parserFunction = m_ParserFunctions[builtinCode];
+ (this->*parserFunction)(subgraphIndex, operatorIndex);
+ }
+ catch (const ParseException& e)
+ {
+ failedToCreate = true;
+ std::stringstream errorString;
+
+ errorString << "Failed to parse operator #" << operatorIndex
+ << " within subgraph #" << subgraphIndex
+ << " error: " << e.what();
+ BOOST_LOG_TRIVIAL(error) << errorString.str();
+
+ errors << errorString.str() << "\n";
+ }
+ ++operatorIndex;
+ }
+
+ SetupInputLayers(subgraphIndex);
+ SetupOutputLayers(subgraphIndex);
+
+ ++subgraphIndex;
+ }
+
+ if (failedToCreate)
+ {
+ // we can skip everything and let the outer exception handler deal with the error
+ throw ParseException(errors.str());
+ }
+
+ // establish the connections from the layer outputs to the inputs of the subsequent layers
+ for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
+ {
+ for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
+ {
+ if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
+ {
+ for (size_t inputSlotIdx = 0;
+ inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
+ ++inputSlotIdx)
+ {
+ m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
+ *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
+ }
+ }
+ }
+ }
+
+ return std::move(m_Network);
+}
+
+void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
+ size_t tensorIndex,
+ armnn::IOutputSlot* slot)
+{
+ CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
+ BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
+ BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
+
+ TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
+
+ // assuming there is only one producer for that tensor
+ if (tensorSlots.outputSlot != nullptr)
+ {
+ throw ParseException(boost::str(
+ boost::format("Another layer has already registered itself as the producer of "
+ "subgraph:%1% tensor:%2% %3%") %
+ subgraphIndex %
+ tensorIndex %
+ CHECK_LOCATION().AsString()));
+ }
+
+ tensorSlots.outputSlot = slot;
+}
+
+void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
+ size_t tensorIndex,
+ armnn::IInputSlot* slot)
+{
+ CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
+ BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
+ BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
+
+ TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
+ tensorSlots.inputSlots.push_back(slot);
+}
+
+void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ //
+ auto opcodeIndex = operatorPtr->opcode_index;
+ auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
+
+ throw ParseException(
+ boost::str(
+ boost::format("Operator not supported. "
+ "subgraph:%1% operator:%2% "
+ "opcode_index:%3% opcode:%4% / %5% %6%") %
+ subgraphIndex %
+ operatorIndex %
+ opcodeIndex %
+ opcode %
+ tflite::EnumNameBuiltinOperator(opcode) %
+ CHECK_LOCATION().AsString()));
+}
+
+void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
+
+ CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
+
+ Pooling2dDescriptor desc;
+
+ desc.m_PoolType = PoolingAlgorithm::Average;
+ desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
+ desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
+ desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
+ desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
+ desc.m_PaddingMethod = PaddingMethod::Exclude;
+ desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+
+ // assuming input is NHWC
+ unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
+ CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+
+ auto layerName = boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
+ IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
+
+ BOOST_ASSERT(layer != nullptr);
+
+ // add permute layers to swizzle the input and deswizzle the output
+ std::pair<IConnectableLayer*, IConnectableLayer*> permuteLayers =
+ SwizzleInDeswizzleOut(*m_Network, layer, 0, inputTensorInfo, 0, outputTensorInfo);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, permuteLayers.first, {inputTensorIndexes[0]});
+
+ // we need to add the activation layer and fortunately we don't need to care about the data layout
+ // beause the activation function is element-wise, so it is OK to have the activation after the trailing
+ // swizzle layer
+ layer = AddActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
+void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
+
+ CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
+
+ Convolution2dDescriptor desc;
+ desc.m_BiasEnabled = false;
+ desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
+ desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 2, 3);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
+
+ // assuming input is NHWC
+ unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ // assuming the filter is OHWI : Output, H, W, Input
+ // which is essentially the same as NHWC
+ unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
+ CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
+
+ auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, true);
+ armnn::IConnectableLayer* layer;
+
+ auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
+
+ if (inputs.size() == 3)
+ {
+ desc.m_BiasEnabled = true;
+ armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
+ auto biasTensorAndData = CreateConstTensor(inputs[2], biasTensorInfo, false);
+ layer = m_Network->AddConvolution2dLayer(desc,
+ filterTensorAndData.first,
+ biasTensorAndData.first,
+ layerName.c_str());
+ }
+ else
+ {
+ layer = m_Network->AddConvolution2dLayer(desc,
+ filterTensorAndData.first,
+ layerName.c_str());
+ }
+
+ BOOST_ASSERT(layer != nullptr);
+
+ // add permute layers to swizzle the input and deswizzle the output
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ std::pair<IConnectableLayer*, IConnectableLayer*> permuteLayers =
+ SwizzleInDeswizzleOut(*m_Network, layer, 0, inputTensorInfo, 0, outputTensorInfo);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, permuteLayers.first, {inputTensorIndexes[0]});
+
+ // we need to add the activation layer and fortunately we don't need to care about the data layout
+ // beause the activation function is element-wise, so it is OK to have the activation after the trailing
+ // swizzle layer
+ layer = AddActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
+void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
+
+ CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
+
+ DepthwiseConvolution2dDescriptor desc;
+ desc.m_BiasEnabled = false;
+ desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
+ desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
+ // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
+ CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 2, 3);
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
+
+ // assuming input is NHWC
+ unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+ // assuming the filter is OHWI : Output, H, W, Input
+ unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
+ CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
+
+ auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, true);
+ armnn::IConnectableLayer* layer;
+ auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
+
+ if (inputs.size() == 3)
+ {
+ desc.m_BiasEnabled = true;
+ TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
+ auto biasTensorAndData = CreateConstTensor(inputs[2], biasTensorInfo, false);
+ layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
+ filterTensorAndData.first,
+ biasTensorAndData.first,
+ layerName.c_str());
+ }
+ else
+ {
+ layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
+ filterTensorAndData.first,
+ layerName.c_str());
+ }
+ BOOST_ASSERT(layer != nullptr);
+
+ // add permute layers to swizzle the input and deswizzle the output
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ std::pair<IConnectableLayer*, IConnectableLayer*> permuteLayers =
+ SwizzleInDeswizzleOut(*m_Network, layer, 0, inputTensorInfo, 0, outputTensorInfo);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, permuteLayers.first, {inputTensorIndexes[0]});
+
+ // we need to add the activation layer and fortunately we don't need to care about the data layout
+ // beause the activation function is element-wise, so it is OK to have the activation after the trailing
+ // swizzle layer
+ layer = AddActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
+void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
+
+ SoftmaxDescriptor desc;
+ desc.m_Beta = options->beta;
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
+ IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
+armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
+ const armnn::TensorInfo & inputTensorInfo)
+{
+ CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
+ std::vector<uint32_t> squeezeDims = squeezeDimsIn;
+ static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
+
+ if (inputTensorInfo.GetNumDimensions() > 4)
+ {
+ std::stringstream ss;
+ ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
+ << " shape:" << inputTensorInfo.GetShape() << " "
+ << CHECK_LOCATION().AsString();
+ throw ParseException(ss.str());
+ }
+
+ if (squeezeDims.empty())
+ {
+ squeezeDims.assign(dimensionSequence,
+ dimensionSequence+inputTensorInfo.GetNumDimensions());
+ }
+
+ std::vector<uint32_t> outputDims;
+ for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
+ {
+ bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
+ auto currentDimension = inputTensorInfo.GetShape()[i];
+ if (skipSqueeze || currentDimension != 1)
+ {
+ outputDims.push_back(currentDimension);
+ }
+ }
+
+ if (outputDims.size() > 4)
+ {
+ std::stringstream ss;
+ ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
+ << " shape:" << inputTensorInfo.GetShape() << " "
+ << CHECK_LOCATION().AsString();
+ throw ParseException(ss.str());
+ }
+
+ TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
+ outputDims.data());
+
+ // we need to preserve the tensor type and the quantization data as well
+ TensorInfo outTensorInfo = inputTensorInfo;
+ outTensorInfo.SetShape(outShape);
+
+ return outTensorInfo;
+}
+
+void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ armnn::TensorInfo outputTensorInfo =
+ TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
+ inputTensorInfo);
+
+ ReshapeDescriptor reshapeDesc;
+ reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
+
+ auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
+ IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
+armnn::IConnectableLayer* TfLiteParser::AddActivationLayer(armnn::IConnectableLayer* prevLayer,
+ unsigned int outputSlot,
+ tflite::ActivationFunctionType activationType)
+{
+ ActivationDescriptor activationDesc;
+ std::string layerName = prevLayer->GetName();
+
+ switch(activationType)
+ {
+ case tflite::ActivationFunctionType_NONE:
+ {
+ // this is a no-op: return previous layer
+ return prevLayer;
+ }
+ case tflite::ActivationFunctionType_RELU:
+ {
+ activationDesc.m_Function = ActivationFunction::ReLu;
+ layerName += ":RELU";
+ break;
+ }
+ case tflite::ActivationFunctionType_RELU6:
+ {
+ activationDesc.m_Function = ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 6.0f;
+ activationDesc.m_B = 0.0f;
+ layerName += ":RELU6";
+ break;
+ }
+ case tflite::ActivationFunctionType_TANH:
+ {
+ activationDesc.m_Function = ActivationFunction::TanH;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = 1.0f;
+ layerName += ":TANH";
+ break;
+ }
+
+ // I only put these here as a reminder what others we could support
+ case tflite::ActivationFunctionType_RELU_N1_TO_1:
+ case tflite::ActivationFunctionType_SIGN_BIT:
+ default:
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("TfLite parser doesn't suppport fused activation: "
+ "%1%/%2% %3% ") %
+ activationType %
+ tflite::EnumNameActivationFunctionType(activationType) %
+ CHECK_LOCATION().AsString()));
+
+ }
+ }
+
+ IConnectableLayer* activationLayer =
+ m_Network->AddActivationLayer(activationDesc, layerName.c_str());
+
+ auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
+ prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
+ activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
+ return activationLayer;
+}
+
+TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
+{
+ if (fileName == nullptr)
+ {
+ throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
+ CHECK_LOCATION().AsString()));
+ }
+ boost::system::error_code errorCode;
+ boost::filesystem::path pathToFile(fileName);
+ if (!boost::filesystem::exists(pathToFile, errorCode))
+ {
+ throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
+ fileName %
+ errorCode %
+ CHECK_LOCATION().AsString()));
+ }
+ std::ifstream file(fileName, std::ios::binary);
+ std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
+ return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
+ fileContent.size());
+}
+
+TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
+{
+ if (binaryContent == nullptr)
+ {
+ throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
+ CHECK_LOCATION().AsString()));
+ }
+ flatbuffers::Verifier verifier(binaryContent, len);
+ if (verifier.VerifyBuffer<tflite::Model>() == false)
+ {
+ throw ParseException(
+ boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
+ "flatbuffers format. size:%1% %2%") %
+ len %
+ CHECK_LOCATION().AsString()));
+ }
+ return tflite::UnPackModel(binaryContent);
+}
+
+TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
+ size_t subgraphIndex,
+ size_t operatorIndex)
+{
+ CHECK_MODEL(model, subgraphIndex, operatorIndex);
+
+ const auto & subGraphPtr = model->subgraphs[subgraphIndex];
+ const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
+
+ size_t inputCount = operatorPtr->inputs.size();
+ TensorRawPtrVector result(inputCount);
+ for (size_t i=0; i<inputCount; ++i)
+ {
+ uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
+ result[i] = subGraphPtr->tensors[inputId].get();
+ }
+ return result;
+}
+
+TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
+ size_t subgraphIndex,
+ size_t operatorIndex)
+{
+ CHECK_MODEL(model, subgraphIndex, operatorIndex);
+
+ const auto & subGraphPtr = model->subgraphs[subgraphIndex];
+ const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
+
+ size_t outputCount = operatorPtr->outputs.size();
+ TensorRawPtrVector result(outputCount);
+ for (size_t i=0; i<outputCount; ++i)
+ {
+ uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
+ CHECK_TENSOR(model, subgraphIndex, outputId);
+ result[i] = subGraphPtr->tensors[outputId].get();
+ }
+ return result;
+}
+
+TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
+ size_t subgraphIndex)
+{
+ CHECK_SUBGRAPH(model, subgraphIndex);
+ const auto & subGraphPtr = model->subgraphs[subgraphIndex];
+
+ size_t inputCount = subGraphPtr->inputs.size();
+ TensorIdRawPtrVector result(inputCount);
+ for (size_t i=0; i<inputCount; ++i)
+ {
+ uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
+ CHECK_TENSOR(model, subgraphIndex, inputId);
+ result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
+ }
+ return result;
+}
+
+TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
+ size_t subgraphIndex)
+{
+ CHECK_SUBGRAPH(model, subgraphIndex);
+ const auto & subGraphPtr = model->subgraphs[subgraphIndex];
+
+ size_t outputCount = subGraphPtr->outputs.size();
+ TensorIdRawPtrVector result(outputCount);
+ for (size_t i=0; i<outputCount; ++i)
+ {
+ uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
+ result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
+ }
+ return result;
+}
+
+std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
+ size_t subgraphIndex,
+ size_t operatorIndex)
+{
+ CHECK_MODEL(model, subgraphIndex, operatorIndex);
+ const auto & subGraphPtr = model->subgraphs[subgraphIndex];
+ const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
+ return operatorPtr->inputs;
+}
+
+std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
+ size_t subgraphIndex,
+ size_t operatorIndex)
+{
+ CHECK_MODEL(model, subgraphIndex, operatorIndex);
+ const auto & subGraphPtr = model->subgraphs[subgraphIndex];
+ const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
+ return operatorPtr->outputs;
+}
+
+void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
+ size_t operatorIndex,
+ IConnectableLayer* layer,
+ const std::vector<unsigned int>& tensorIndexes)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+ BOOST_ASSERT(layer != nullptr);
+ if (tensorIndexes.size() != layer->GetNumInputSlots())
+ {
+ throw ParseException(
+ boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
+ " for subgraph:%3% operator index:%4% %5%") %
+ tensorIndexes.size() %
+ layer->GetNumInputSlots() %
+ subgraphIndex %
+ operatorIndex %
+ CHECK_LOCATION().AsString()));
+ }
+
+ for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
+ {
+ unsigned int tensorIndex = tensorIndexes[slotIndex];
+ armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
+ RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
+ }
+}
+
+void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
+ size_t operatorIndex,
+ IConnectableLayer* layer,
+ const std::vector<unsigned int>& tensorIndexes)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+ BOOST_ASSERT(layer != nullptr);
+ if (tensorIndexes.size() != layer->GetNumOutputSlots())
+ {
+ throw ParseException(
+ boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
+ " for subgraph:%3% operator index:%4% %5%") %
+ tensorIndexes.size() %
+ layer->GetNumOutputSlots() %
+ subgraphIndex %
+ operatorIndex %
+ CHECK_LOCATION().AsString()));
+ }
+
+ for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
+ {
+ unsigned int tensorIndex = tensorIndexes[slotIndex];
+ armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
+ RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
+ }
+}
+
+void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
+{
+ CHECK_SUBGRAPH(m_Model, subgraphIndex);
+
+ auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
+ for (auto const & tensorIdAndPtr : inputs)
+ {
+ auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
+ IConnectableLayer* layer =
+ m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
+
+ auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
+ layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ RegisterOutputSlots(subgraphIndex,
+ VIRTUAL_OPERATOR_ID,
+ layer,
+ { static_cast<uint32_t>(tensorIdAndPtr.first) });
+ }
+}
+
+void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
+{
+ CHECK_SUBGRAPH(m_Model, subgraphIndex);
+
+ auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
+ for (auto const & tensorIdAndPtr : outputs)
+ {
+ auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
+ IConnectableLayer* layer =
+ m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
+
+ RegisterInputSlots(subgraphIndex,
+ VIRTUAL_OPERATOR_ID,
+ layer,
+ { static_cast<uint32_t>(tensorIdAndPtr.first) });
+ }
+}
+
+// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
+TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
+{
+ CHECK_BUFFER(model, bufferIndex);
+ return model->buffers[bufferIndex].get();
+}
+
+std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
+TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
+ armnn::TensorInfo & tensorInfo,
+ bool convertFromTfToArmnnFormat)
+{
+ CHECK_TENSOR_PTR(tensorPtr);
+ auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
+ CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
+
+ switch (tensorInfo.GetDataType())
+ {
+ case armnn::DataType::Float32:
+ {
+ auto constData = CreateConstTensorImpl<float>(bufferPtr,
+ tensorPtr,
+ tensorInfo,
+ convertFromTfToArmnnFormat);
+ SupportedDataStorage storage(std::move(constData.second));
+ return std::make_pair(constData.first, std::move(storage));
+ }
+ case armnn::DataType::QuantisedAsymm8:
+ {
+ auto constData = CreateConstTensorImpl<uint8_t>(bufferPtr,
+ tensorPtr,
+ tensorInfo,
+ convertFromTfToArmnnFormat);
+ SupportedDataStorage storage(std::move(constData.second));
+ return std::make_pair(constData.first, std::move(storage));
+ }
+ case armnn::DataType::Signed32:
+ {
+ auto constData = CreateConstTensorImpl<int32_t>(bufferPtr,
+ tensorPtr,
+ tensorInfo,
+ convertFromTfToArmnnFormat);
+ SupportedDataStorage storage(std::move(constData.second));
+ return std::make_pair(constData.first, std::move(storage));
+ }
+ default:
+ {
+ std::stringstream errString;
+ errString << "Unexpected datatype when creating const tensor: "
+ << armnn::GetDataTypeName(tensorInfo.GetDataType())
+ << " shape:" << tensorInfo.GetShape()
+ << CHECK_LOCATION().AsString();
+ throw ParseException(errString.str());
+ }
+ }
+}
+
+BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
+ const std::string& name) const
+{
+ CHECK_SUBGRAPH(m_Model, subgraphId);
+ auto inputs = GetSubgraphInputs(m_Model, subgraphId);
+ for (auto const & input : inputs)
+ {
+ if (input.second->name == name)
+ {
+ auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
+ return std::make_pair(bindingId, ToTensorInfo(input.second));
+ }
+ }
+
+ std::stringstream bindings;
+ for (auto const & input : inputs)
+ {
+ bindings << "'" << input.second->name << "' ";
+ }
+
+ throw ParseException(
+ boost::str(
+ boost::format("No input binding found for subgraph:%1% and name:%2%. "
+ "Possible inputs are: [%3%] %4%") %
+ subgraphId %
+ name %
+ bindings.str() %
+ CHECK_LOCATION().AsString()));
+}
+
+BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
+ const std::string& name) const
+{
+ CHECK_SUBGRAPH(m_Model, subgraphId);
+ auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
+ for (auto const & output : outputs)
+ {
+ if (output.second->name == name)
+ {
+ auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
+ return std::make_pair(bindingId, ToTensorInfo(output.second));
+ }
+ }
+
+ std::stringstream bindings;
+ for (auto const & output : outputs)
+ {
+ bindings << "'" << output.second->name << "' ";
+ }
+
+ throw ParseException(
+ boost::str(
+ boost::format("No output binding found for subgraph:%1% and name:%2%. "
+ "Possible outputs are: [%3%] %4%") %
+ subgraphId %
+ name %
+ bindings.str() %
+ CHECK_LOCATION().AsString()));
+}
+
+size_t TfLiteParser::GetSubgraphCount() const
+{
+ return m_Model->subgraphs.size();
+}
+
+std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
+{
+ CHECK_SUBGRAPH(m_Model, subgraphId);
+ auto inputs = GetSubgraphInputs(m_Model, subgraphId);
+ std::vector<std::string> result;
+ result.reserve(inputs.size());
+ for (auto const & input : inputs)
+ {
+ result.push_back(input.second->name);
+ }
+ return result;
+}
+
+std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
+{
+ CHECK_SUBGRAPH(m_Model, subgraphId);
+ auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
+ std::vector<std::string> result;
+ result.reserve(outputs.size());
+ for (auto const & output : outputs)
+ {
+ result.push_back(output.second->name);
+ }
+ return result;
+}
+
+ITfLiteParser* ITfLiteParser::CreateRaw()
+{
+ return new TfLiteParser();
+}
+
+ITfLiteParserPtr ITfLiteParser::Create()
+{
+ return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
+}
+
+void ITfLiteParser::Destroy(ITfLiteParser* parser)
+{
+ delete parser;
+}
+
+TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
+: m_FloatData(std::move(data))
+, m_Uint8Data(nullptr)
+, m_Int32Data(nullptr)
+{
+}
+
+TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
+: m_FloatData(nullptr)
+, m_Uint8Data(std::move(data))
+, m_Int32Data(nullptr)
+{
+}
+
+TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
+: m_FloatData(nullptr)
+, m_Uint8Data(nullptr)
+, m_Int32Data(std::move(data))
+{
+}
+
+} // armnnTfLiteParser
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
new file mode 100644
index 0000000000..91585af5d0
--- /dev/null
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -0,0 +1,156 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "armnn/INetwork.hpp"
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+
+#include <schema_generated.h>
+#include <functional>
+#include <vector>
+
+namespace armnnTfLiteParser
+{
+
+class TfLiteParser : public ITfLiteParser
+{
+public:
+ // Shorthands for TfLite types
+ using ModelPtr = std::unique_ptr<tflite::ModelT>;
+ using SubGraphPtr = std::unique_ptr<tflite::SubGraphT>;
+ using OperatorPtr = std::unique_ptr<tflite::OperatorT>;
+ using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>;
+ using TensorPtr = std::unique_ptr<tflite::TensorT>;
+ using TensorRawPtr = const tflite::TensorT *;
+ using TensorRawPtrVector = std::vector<TensorRawPtr>;
+ using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>;
+ using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>;
+ using BufferPtr = std::unique_ptr<tflite::BufferT>;
+ using BufferRawPtr = const tflite::BufferT *;
+
+public:
+ /// Create the network from a flatbuffers binary file on disk
+ virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile) override;
+
+ /// Create the network from a flatbuffers binary
+ virtual armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent) override;
+
+
+ /// Retrieve binding info (layer id and tensor info) for the network input identified by
+ /// the given layer name and subgraph id
+ virtual BindingPointInfo GetNetworkInputBindingInfo(size_t subgraphId,
+ const std::string& name) const override;
+
+ /// Retrieve binding info (layer id and tensor info) for the network output identified by
+ /// the given layer name and subgraph id
+ virtual BindingPointInfo GetNetworkOutputBindingInfo(size_t subgraphId,
+ const std::string& name) const override;
+
+ /// Return the number of subgraphs in the parsed model
+ virtual size_t GetSubgraphCount() const override;
+
+ /// Return the input tensor names for a given subgraph
+ virtual std::vector<std::string> GetSubgraphInputTensorNames(size_t subgraphId) const override;
+
+ /// Return the output tensor names for a given subgraph
+ virtual std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId) const override;
+
+ TfLiteParser();
+ virtual ~TfLiteParser() {}
+
+public:
+ // testable helpers
+ static ModelPtr LoadModelFromFile(const char * fileName);
+ static ModelPtr LoadModelFromBinary(const uint8_t * binaryContent, size_t len);
+ static TensorRawPtrVector GetInputs(const ModelPtr & model, size_t subgraphIndex, size_t operatorIndex);
+ static TensorRawPtrVector GetOutputs(const ModelPtr & model, size_t subgraphIndex, size_t operatorIndex);
+ static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr & model, size_t subgraphIndex);
+ static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr & model, size_t subgraphIndex);
+ static std::vector<int32_t>& GetInputTensorIds(const ModelPtr& model, size_t subgraphIndex, size_t operatorIndex);
+ static std::vector<int32_t>& GetOutputTensorIds(const ModelPtr& model, size_t subgraphIndex, size_t operatorIndex);
+
+ static BufferRawPtr GetBuffer(const ModelPtr& model, size_t bufferIndex);
+ static armnn::TensorInfo OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDims,
+ const armnn::TensorInfo & inputTensorInfo);
+
+
+private:
+ // No copying allowed until it is wanted and properly implemented
+ TfLiteParser(const TfLiteParser &) = delete;
+ TfLiteParser & operator=(const TfLiteParser &) = delete;
+
+ /// Create the network from an already loaded flatbuffers model
+ armnn::INetworkPtr CreateNetworkFromModel();
+
+ // signature for the parser functions
+ using OperatorParsingFunction = void(TfLiteParser::*)(size_t subgraphIndex, size_t operatorIndex);
+
+ void ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex);
+ void ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex);
+ void ParseConv2D(size_t subgraphIndex, size_t operatorIndex);
+ void ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex);
+ void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
+ void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
+
+ void RegisterProducerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IOutputSlot* slot);
+ void RegisterConsumerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IInputSlot* slot);
+ void RegisterInputSlots(size_t subgraphIndex,
+ size_t operatorIndex,
+ armnn::IConnectableLayer* layer,
+ const std::vector<unsigned int>& tensorIndexes);
+ void RegisterOutputSlots(size_t subgraphIndex,
+ size_t operatorIndex,
+ armnn::IConnectableLayer* layer,
+ const std::vector<unsigned int>& tensorIndexes);
+
+ void SetupInputLayers(size_t subgraphIndex);
+ void SetupOutputLayers(size_t subgraphIndex);
+
+ void ResetParser();
+
+ /// Attach an activation layer to the one passed as a parameter
+ armnn::IConnectableLayer* AddActivationLayer(armnn::IConnectableLayer* layer,
+ unsigned int outputSlot,
+ tflite::ActivationFunctionType activationType);
+
+ // SupportedDataStorage's purpose is to hold data till we pass over to the network.
+ // We don't care about the content, and we want a single datatype to simplify the code.
+ struct SupportedDataStorage
+ {
+ std::unique_ptr<float[]> m_FloatData;
+ std::unique_ptr<uint8_t[]> m_Uint8Data;
+ std::unique_ptr<int32_t[]> m_Int32Data;
+
+ SupportedDataStorage(std::unique_ptr<float[]> && data);
+ SupportedDataStorage(std::unique_ptr<uint8_t[]> && data);
+ SupportedDataStorage(std::unique_ptr<int32_t[]> && data);
+ };
+
+ std::pair<armnn::ConstTensor, SupportedDataStorage> CreateConstTensor(TensorRawPtr tensorPtr,
+ armnn::TensorInfo & tensorInfo,
+ bool convertFromTfToArmnnFormat);
+
+ /// The network we're building. Gets cleared after it is passed to the user
+ armnn::INetworkPtr m_Network;
+ std::vector<OperatorParsingFunction> m_ParserFunctions;
+ ModelPtr m_Model;
+
+ /// A mapping of an output slot to each of the input slots it should be connected to
+ /// The outputSlot is from the layer that creates this tensor as one of its ouputs
+ /// The inputSlots are from the layers that use this tensor as one of their inputs
+ struct TensorSlots
+ {
+ armnn::IOutputSlot* outputSlot;
+ std::vector<armnn::IInputSlot*> inputSlots;
+
+ TensorSlots() : outputSlot(nullptr) { }
+ };
+ typedef std::vector<TensorSlots> TensorConnections;
+ /// Connections for tensors in each subgraph
+ /// The first index is the subgraph ID, the second index is the tensor ID
+ std::vector<TensorConnections> m_SubgraphConnections;
+};
+
+}
diff --git a/src/armnnTfLiteParser/test/AvgPool2D.cpp b/src/armnnTfLiteParser/test/AvgPool2D.cpp
new file mode 100644
index 0000000000..ba6d2ae40a
--- /dev/null
+++ b/src/armnnTfLiteParser/test/AvgPool2D.cpp
@@ -0,0 +1,119 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#include "ParserFlatbuffersFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct AvgPool2DFixture : public ParserFlatbuffersFixture
+{
+ explicit AvgPool2DFixture(std::string inputdim, std::string outputdim, std::string dataType)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": )"
+ + outputdim
+ + R"(,
+ "type": )"
+ + dataType
+ + R"(,
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": )"
+ + inputdim
+ + R"(,
+ "type": )"
+ + dataType
+ + R"(,
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ }
+ ],
+ "description": "AvgPool2D test.",
+ "buffers" : [ {}, {} ]
+ })";
+
+ SetupSingleInputSingleOutput("InputTensor", "OutputTensor");
+ }
+};
+
+
+struct AvgPoolLiteFixtureUint1DOutput : AvgPool2DFixture
+{
+ AvgPoolLiteFixtureUint1DOutput() : AvgPool2DFixture("[ 1, 2, 2, 1 ]", "[ 1, 1, 1, 1 ]", "UINT8") {}
+};
+
+struct AvgPoolLiteFixtureFloat1DOutput : AvgPool2DFixture
+{
+ AvgPoolLiteFixtureFloat1DOutput() : AvgPool2DFixture("[ 1, 2, 2, 1 ]", "[ 1, 1, 1, 1 ]", "FLOAT32") {}
+};
+
+struct AvgPoolLiteFixture2DOutput : AvgPool2DFixture
+{
+ AvgPoolLiteFixture2DOutput() : AvgPool2DFixture("[ 1, 4, 4, 1 ]", "[ 1, 2, 2, 1 ]", "UINT8") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
+{
+ RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 });
+}
+
+BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
+{
+ RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f });
+}
+
+BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
+{
+ RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
+}
+
+BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
+{
+ BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
new file mode 100644
index 0000000000..8a17dec47a
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -0,0 +1,351 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+#include <sstream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SimpleConv2DFixture : public ParserFlatbuffersFixture
+{
+ explicit SimpleConv2DFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
+ { },
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
+{
+ RunTest<4, uint8_t>(
+ 0,
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9,
+ },
+ // because of the output scaling we need to take half of the values
+ {
+ (1*2 + 2*1 + 3*0 +
+ 4*6 + 5*2 + 6*1 +
+ 7*4 + 8*1 + 9*2) /2
+ });
+}
+
+struct Conv2DWithBiasesFixture : public ParserFlatbuffersFixture
+{
+ explicit Conv2DWithBiasesFixture(const std::string & inputShape,
+ const std::string & outputShape,
+ const std::string & filterShape,
+ const std::string & filterData,
+ const std::string & biasShape,
+ const std::string & biasData,
+ const std::string & strides,
+ const std::string & activation="NONE",
+ const std::string & filterScale="1.0",
+ const std::string & filterZeroPoint="0",
+ const std::string & outputScale="2.0",
+ const std::string & outputZeroPoint="0")
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ )" + outputScale + R"( ],
+ "zero_point": [ )" + outputZeroPoint + R"( ],
+ }
+ },
+ {
+ "shape": )" + filterShape + R"( ,
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + filterScale + R"( ],
+ "zero_point": [ )" + filterZeroPoint + R"( ],
+ }
+ },
+ {
+ "shape": )" + biasShape + R"( ,
+ "type": "INT32",
+ "buffer": 3,
+ "name": "biasTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2, 3 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "SAME",
+ "stride_w": )" + strides + R"(,
+ "stride_h": )" + strides + R"(,
+ "fused_activation_function": )" + activation + R"(
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": )" + filterData + R"(, },
+ { "data": )" + biasData + R"(, },
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
+{
+ SimpleConv2DWithBiasesFixture()
+ : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
+ "[ 1, 2, 2, 1 ]", // outputShape
+ "[ 1, 2, 2, 1 ]", // filterShape
+ "[ 2,1, 0,6 ]", // filterData
+ "[ 1 ]", // biasShape
+ "[ 10, 0, 0, 0 ]", // biasData
+ "1") // stride w and h
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
+{
+ RunTest<4, uint8_t>(
+ 0,
+ {
+ 1, 2,
+ 3, 4,
+ },
+ // because of the output scaling we need to take half of the values
+ {
+ (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
+ (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
+ (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
+ (4*2 + 0*1 + 0*0 + 0*6 + 10)/2
+ });
+}
+
+struct Conv2DShapeTestFixture : Conv2DWithBiasesFixture
+{
+ static std::string GenerateInts(unsigned int n)
+ {
+ std::stringstream ss;
+ ss << " [ ";
+ for( unsigned int i=0; i<n; ++i ) {
+ if (i > 0 )
+ {
+ ss << " , ";
+ }
+ ss << " " << (i%256);
+ }
+ ss << " ] ";
+ return ss.str();
+ }
+
+ Conv2DShapeTestFixture()
+ : Conv2DWithBiasesFixture("[ 1, 224, 224, 3 ]", // inputShape
+ "[ 1, 112, 112, 32 ]", // outputShape
+ "[ 32, 3, 3, 3 ]", // filterShape
+ GenerateInts(32*3*3*3), // filterData
+ "[ 32 ]", // biasShape
+ GenerateInts(32*4), // biasData
+ "2") // stride w and h
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseConv2D_112x112_out, Conv2DShapeTestFixture )
+{
+}
+
+struct ReluConv2DWithBiasesFixture : Conv2DWithBiasesFixture
+{
+ ReluConv2DWithBiasesFixture()
+ : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
+ "[ 1, 2, 2, 1 ]", // outputShape
+ "[ 1, 2, 2, 1 ]", // filterShape
+ "[ 2,1, 0,6 ]", // filterData
+ "[ 1 ]", // biasShape
+ "[ 16, 0, 0, 0 ]", // biasData
+ "1", // stride w and h
+ "RELU", // activation
+ "1.0", // filter scale
+ "4", // filter zero point
+ "2.0", // output scale
+ "20") // output zero point
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture )
+{
+ uint8_t bias = 16;
+ uint8_t outZero = 20;
+ uint8_t fz = 4; // filter zero point
+
+ RunTest<4, uint8_t>(
+ 0,
+ {
+ 1, 2,
+ 4, 8,
+ },
+ // factors to consider:
+ // - the filter zero point is non zero, hence the (x-fz)
+ // - the output scale is 2 hence the /2
+ // - output zero point is non zero, hence the +outZero
+ // - RELU cuts negative values and then we add the output zero point
+ {
+ std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
+ std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
+ std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
+ std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
+ });
+}
+
+struct Relu6Conv2DWithBiasesFixture : Conv2DWithBiasesFixture
+{
+ Relu6Conv2DWithBiasesFixture()
+ : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
+ "[ 1, 2, 2, 1 ]", // outputShape
+ "[ 1, 2, 2, 1 ]", // filterShape
+ "[ 2,1, 0,6 ]", // filterData
+ "[ 1 ]", // biasShape
+ "[ 0, 0, 0, 0 ]", // biasData
+ "1", // stride w and h
+ "RELU6", // activation
+ "1.0", // filter scale
+ "0", // filter zero point
+ "2.0", // output scale
+ "0") // output zero point
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixture )
+{
+ uint8_t relu6Min = 6 / 2; // divide by output scale
+
+ RunTest<4, uint8_t>(
+ 0,
+ {
+ 1, 2,
+ 4, 1,
+ },
+ // factors to consider:
+ // - the output scale is 2 hence the /2
+ // - RELU6 cuts output values at +6
+ {
+ std::min(relu6Min, static_cast<uint8_t>((1*2 + 2*1 + 4*0 + 1*6)/2)),
+ std::min(relu6Min, static_cast<uint8_t>((2*2 + 0*1 + 1*0 + 0*6)/2)),
+ std::min(relu6Min, static_cast<uint8_t>((4*2 + 1*1 + 0*0 + 0*6)/2)),
+ std::min(relu6Min, static_cast<uint8_t>((1*2 + 0*1 + 0*0 + 0*6)/2))
+ });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
new file mode 100644
index 0000000000..4a06418095
--- /dev/null
+++ b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
@@ -0,0 +1,199 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct DepthwiseConvolution2dFixture : public ParserFlatbuffersFixture
+{
+ explicit DepthwiseConvolution2dFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& filterShape,
+ const std::string& filterData,
+ const std::string& strides,
+ const std::string& paddingType,
+ const std::string biasShape = "",
+ const std::string biasData = "")
+ {
+ std::string inputTensors = "[ 0, 2 ]";
+ std::string biasTensor = "";
+ std::string biasBuffer = "";
+ if (biasShape.size() > 0 && biasData.size() > 0)
+ {
+ inputTensors = "[ 0, 2, 3 ]";
+ biasTensor = R"(
+ {
+ "shape": )" + biasShape + R"( ,
+ "type": "INT32",
+ "buffer": 3,
+ "name": "biasTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ } )";
+ biasBuffer = R"(
+ { "data": )" + biasData + R"(, }, )";
+ }
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "DEPTHWISE_CONV_2D" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + filterShape + R"(,
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }, )" + biasTensor + R"(
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": )" + inputTensors + R"(,
+ "outputs": [ 1 ],
+ "builtin_options_type": "DepthwiseConv2DOptions",
+ "builtin_options": {
+ "padding": ")" + paddingType + R"(",
+ "stride_w": )" + strides+ R"(,
+ "stride_h": )" + strides+ R"(,
+ "depth_multiplier": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": )" + filterData + R"(, }, )"
+ + biasBuffer + R"(
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
+{
+ DepthwiseConvolution2dSameFixture()
+ : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
+ "[ 1, 3, 3, 1 ]", // outputShape
+ "[ 1, 3, 3, 1 ]", // filterShape
+ "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
+ "1", // stride w and h
+ "SAME") // padding type
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
+{
+ RunTest<4, uint8_t>(
+ 0,
+ { 0, 1, 2,
+ 3, 4, 5,
+ 6, 7, 8 },
+ // the expected values were generated using the example python implementation at
+ // https://eli.thegreenplace.net/2018/depthwise-separable-convolutions-for-machine-learning/
+ // divide the expected values by the output scale, as it is not 1.0
+ { 14/2, 35/2, 38/2,
+ 57/2, 120/2, 111/2,
+ 110/2, 197/2, 158/2 });
+}
+
+struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
+{
+ DepthwiseConvolution2dValidFixture ()
+ : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
+ "[ 1, 1, 1, 1 ]", // outputShape
+ "[ 1, 3, 3, 1 ]", // filterShape
+ "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
+ "1", // stride w and h
+ "VALID") // padding type
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
+{
+ RunTest<4, uint8_t>(
+ 0,
+ { 0, 1, 2,
+ 3, 4, 5,
+ 6, 7, 8 },
+ // divide the expected values by the output scale, as it is not 1.0
+ { 120/2 });
+}
+
+struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
+{
+ DepthwiseConvolution2dSameBiasFixture()
+ : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
+ "[ 1, 3, 3, 1 ]", // outputShape
+ "[ 1, 3, 3, 1 ]", // filterShape
+ "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
+ "1", // stride w and h
+ "SAME", // padding type
+ "[ 1 ]", // biasShape
+ "[ 10, 0, 0, 0 ]") // biasData
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
+{
+ RunTest<4, uint8_t>(
+ 0,
+ { 0, 1, 2,
+ 3, 4, 5,
+ 6, 7, 8 },
+ // divide the expected values by the output scale, as it is not 1.0
+ { ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
+ ( 57+10)/2, (120+10)/2, (111+10)/2,
+ (110+10)/2, (197+10)/2, (158+10)/2 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/GetBuffer.cpp b/src/armnnTfLiteParser/test/GetBuffer.cpp
new file mode 100644
index 0000000000..7486f01b52
--- /dev/null
+++ b/src/armnnTfLiteParser/test/GetBuffer.cpp
@@ -0,0 +1,126 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+#include <sstream>
+
+using armnnTfLiteParser::TfLiteParser;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct GetBufferFixture : public ParserFlatbuffersFixture
+{
+ explicit GetBufferFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
+ { },
+ ]
+ }
+ )";
+ ReadStringToBinary();
+ }
+
+ void CheckBufferContents(const TfLiteParser::ModelPtr& model,
+ std::vector<int32_t> bufferValues, size_t bufferIndex)
+ {
+ for(long unsigned int i=0; i<bufferValues.size(); i++)
+ {
+ BOOST_CHECK_EQUAL(TfLiteParser::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
+ }
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(GetBufferCheckContents, GetBufferFixture)
+{
+ //Check contents of buffer are correct
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> bufferValues = {2,1,0,6,2,1,4,1,2};
+ CheckBufferContents(model, bufferValues, 2);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetBufferCheckEmpty, GetBufferFixture)
+{
+ //Check if test fixture buffers are empty or not
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK(TfLiteParser::GetBuffer(model, 0)->data.empty());
+ BOOST_CHECK(TfLiteParser::GetBuffer(model, 1)->data.empty());
+ BOOST_CHECK(!TfLiteParser::GetBuffer(model, 2)->data.empty());
+ BOOST_CHECK(TfLiteParser::GetBuffer(model, 3)->data.empty());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetBufferCheckParseException, GetBufferFixture)
+{
+ //Check if armnn::ParseException thrown when invalid buffer index used
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetBuffer(model, 4)->data.empty(), armnn::Exception);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
new file mode 100644
index 0000000000..2c12c1976a
--- /dev/null
+++ b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
@@ -0,0 +1,239 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+using armnnTfLiteParser::TfLiteParser;
+using ModelPtr = TfLiteParser::ModelPtr;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct GetInputsOutputsMainFixture : public ParserFlatbuffersFixture
+{
+ explicit GetInputsOutputsMainFixture(const std::string& inputs, const std::string& outputs)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" }, { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 1, 1, 1 ] ,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": [ 1, 2, 2, 1 ] ,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ -1.2 ],
+ "max": [ 25.5 ],
+ "scale": [ 0.25 ],
+ "zero_point": [ 10 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": )"
+ + inputs
+ + R"(,
+ "outputs": )"
+ + outputs
+ + R"(,
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ },
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "ConvInputTensor",
+ "quantization": {
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "ConvOutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ }
+ ],
+ "description": "Test Subgraph Inputs Outputs",
+ "buffers" : [
+ { },
+ { },
+ { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
+ { },
+ ]
+ })";
+
+ ReadStringToBinary();
+ }
+
+};
+
+struct GetEmptyInputsOutputsFixture : GetInputsOutputsMainFixture
+{
+ GetEmptyInputsOutputsFixture() : GetInputsOutputsMainFixture("[ ]", "[ ]") {}
+};
+
+struct GetInputsOutputsFixture : GetInputsOutputsMainFixture
+{
+ GetInputsOutputsFixture() : GetInputsOutputsMainFixture("[ 1 ]", "[ 0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(GetEmptyInputs, GetEmptyInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 0, 0);
+ BOOST_CHECK_EQUAL(0, tensors.size());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetEmptyOutputs, GetEmptyInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 0, 0);
+ BOOST_CHECK_EQUAL(0, tensors.size());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputs, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 0, 0);
+ BOOST_CHECK_EQUAL(1, tensors.size());
+ CheckTensors(tensors[0], 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
+ "InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputs, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 0, 0);
+ BOOST_CHECK_EQUAL(1, tensors.size());
+ CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
+ "OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 1, 0);
+ BOOST_CHECK_EQUAL(2, tensors.size());
+ CheckTensors(tensors[0], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
+ "ConvInputTensor", { }, { }, { 1.0f }, { 0 });
+ CheckTensors(tensors[1], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 2,
+ "filterTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputs2, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 1, 0);
+ BOOST_CHECK_EQUAL(1, tensors.size());
+ CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
+ "ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
+}
+
+BOOST_AUTO_TEST_CASE(GetInputsNullModel)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetInputs(nullptr, 0, 0), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_CASE(GetOutputsNullModel)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputs(nullptr, 0, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputsInvalidSubgraph, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetInputs(model, 2, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidSubgraph, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputs(model, 2, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputsInvalidOperator, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetInputs(model, 0, 1), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidOperator, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputs(model, 0, 1), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
new file mode 100644
index 0000000000..7e6808d11e
--- /dev/null
+++ b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
@@ -0,0 +1,230 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+using armnnTfLiteParser::TfLiteParser;
+using ModelPtr = TfLiteParser::ModelPtr;
+using TensorRawPtr = TfLiteParser::TensorRawPtr;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct GetSubgraphInputsOutputsMainFixture : public ParserFlatbuffersFixture
+{
+ explicit GetSubgraphInputsOutputsMainFixture(const std::string& inputs, const std::string& outputs)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" }, { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 1, 1, 1 ] ,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": [ 1, 2, 2, 1 ] ,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ -1.2 ],
+ "max": [ 25.5 ],
+ "scale": [ 0.25 ],
+ "zero_point": [ 10 ]
+ }
+ }
+ ],
+ "inputs": )"
+ + inputs
+ + R"(,
+ "outputs": )"
+ + outputs
+ + R"(,
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ },
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "ConvInputTensor",
+ "quantization": {
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "ConvOutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ }
+ ],
+ "description": "Test Subgraph Inputs Outputs",
+ "buffers" : [
+ { },
+ { },
+ { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
+ { },
+ ]
+ })";
+
+ ReadStringToBinary();
+ }
+
+};
+
+struct GetEmptySubgraphInputsOutputsFixture : GetSubgraphInputsOutputsMainFixture
+{
+ GetEmptySubgraphInputsOutputsFixture() : GetSubgraphInputsOutputsMainFixture("[ ]", "[ ]") {}
+};
+
+struct GetSubgraphInputsOutputsFixture : GetSubgraphInputsOutputsMainFixture
+{
+ GetSubgraphInputsOutputsFixture() : GetSubgraphInputsOutputsMainFixture("[ 1 ]", "[ 0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphInputs, GetEmptySubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 0);
+ BOOST_CHECK_EQUAL(0, subgraphTensors.size());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphOutputs, GetEmptySubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 0);
+ BOOST_CHECK_EQUAL(0, subgraphTensors.size());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphInputs, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 0);
+ BOOST_CHECK_EQUAL(1, subgraphTensors.size());
+ BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
+ CheckTensors(subgraphTensors[0].second, 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
+ "InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsSimpleQuantized, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 0);
+ BOOST_CHECK_EQUAL(1, subgraphTensors.size());
+ BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
+ CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
+ "OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsEmptyMinMax, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 1);
+ BOOST_CHECK_EQUAL(1, subgraphTensors.size());
+ BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
+ CheckTensors(subgraphTensors[0].second, 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
+ "ConvInputTensor", { }, { }, { 1.0f }, { 0 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputs, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 1);
+ BOOST_CHECK_EQUAL(1, subgraphTensors.size());
+ BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
+ CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
+ "ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
+}
+
+BOOST_AUTO_TEST_CASE(GetSubgraphInputsNullModel)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_CASE(GetSubgraphOutputsNullModel)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetSubgraphInputs(model, 2), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetSubgraphOutputs(model, 2), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetTensorIds.cpp b/src/armnnTfLiteParser/test/GetTensorIds.cpp
new file mode 100644
index 0000000000..2d123111d3
--- /dev/null
+++ b/src/armnnTfLiteParser/test/GetTensorIds.cpp
@@ -0,0 +1,162 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+using armnnTfLiteParser::TfLiteParser;
+using ModelPtr = TfLiteParser::ModelPtr;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct GetTensorIdsFixture : public ParserFlatbuffersFixture
+{
+ explicit GetTensorIdsFixture(const std::string& inputs, const std::string& outputs)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 1, 1, 1 ] ,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": [ 1, 2, 2, 1 ] ,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": )"
+ + inputs
+ + R"(,
+ "outputs": )"
+ + outputs
+ + R"(,
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ }
+ ],
+ "description": "Test loading a model",
+ "buffers" : [ {}, {} ]
+ })";
+
+ ReadStringToBinary();
+ }
+};
+
+struct GetEmptyTensorIdsFixture : GetTensorIdsFixture
+{
+ GetEmptyTensorIdsFixture() : GetTensorIdsFixture("[ ]", "[ ]") {}
+};
+
+struct GetInputOutputTensorIdsFixture : GetTensorIdsFixture
+{
+ GetInputOutputTensorIdsFixture() : GetTensorIdsFixture("[ 0, 1, 2 ]", "[ 3 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(GetEmptyInputTensorIds, GetEmptyTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> expectedIds = { };
+ std::vector<int32_t> inputTensorIds = TfLiteParser::GetInputTensorIds(model, 0, 0);
+ BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
+ inputTensorIds.begin(), inputTensorIds.end());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetEmptyOutputTensorIds, GetEmptyTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> expectedIds = { };
+ std::vector<int32_t> outputTensorIds = TfLiteParser::GetOutputTensorIds(model, 0, 0);
+ BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
+ outputTensorIds.begin(), outputTensorIds.end());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputTensorIds, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> expectedInputIds = { 0, 1, 2 };
+ std::vector<int32_t> inputTensorIds = TfLiteParser::GetInputTensorIds(model, 0, 0);
+ BOOST_CHECK_EQUAL_COLLECTIONS(expectedInputIds.begin(), expectedInputIds.end(),
+ inputTensorIds.begin(), inputTensorIds.end());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputTensorIds, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> expectedOutputIds = { 3 };
+ std::vector<int32_t> outputTensorIds = TfLiteParser::GetOutputTensorIds(model, 0, 0);
+ BOOST_CHECK_EQUAL_COLLECTIONS(expectedOutputIds.begin(), expectedOutputIds.end(),
+ outputTensorIds.begin(), outputTensorIds.end());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidSubGraph, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(model, 1, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidSubGraph, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(model, 0, 1), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp b/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
new file mode 100644
index 0000000000..fc88a4e58d
--- /dev/null
+++ b/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
@@ -0,0 +1,138 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct EmptyNetworkFixture : public ParserFlatbuffersFixture
+{
+ explicit EmptyNetworkFixture() {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [],
+ "subgraphs": [ {} ]
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(EmptyNetworkHasNoInputsAndOutputs, EmptyNetworkFixture)
+{
+ Setup();
+ BOOST_TEST(m_Parser->GetSubgraphCount() == 1);
+ BOOST_TEST(m_Parser->GetSubgraphInputTensorNames(0).size() == 0);
+ BOOST_TEST(m_Parser->GetSubgraphOutputTensorNames(0).size() == 0);
+}
+
+struct MissingTensorsFixture : public ParserFlatbuffersFixture
+{
+ explicit MissingTensorsFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [],
+ "subgraphs": [{
+ "inputs" : [ 0, 1 ],
+ "outputs" : [ 2, 3 ],
+ }]
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MissingTensorsThrowException, MissingTensorsFixture)
+{
+ // this throws because it cannot do the input output tensor connections
+ BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+}
+
+struct InvalidTensorsFixture : public ParserFlatbuffersFixture
+{
+ explicit InvalidTensorsFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ ],
+ "subgraphs": [{
+ "tensors": [ {}, {}, {}, {} ],
+ "inputs" : [ 0, 1 ],
+ "outputs" : [ 2, 3 ],
+ }]
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(InvalidTensorsThrowException, InvalidTensorsFixture)
+{
+ // this throws because it cannot do the input output tensor connections
+ BOOST_CHECK_THROW(Setup(), armnn::InvalidArgumentException);
+}
+
+struct ValidTensorsFixture : public ParserFlatbuffersFixture
+{
+ explicit ValidTensorsFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" } ],
+ "subgraphs": [{
+ "tensors": [ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "FLOAT32",
+ "name": "In",
+ "buffer": 0,
+ }, {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "FLOAT32",
+ "name": "Out",
+ "buffer": 1,
+ }],
+ "inputs" : [ 0 ],
+ "outputs" : [ 1 ],
+ "operators": [{
+ "opcode_index": 0,
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "filter_width": 1,
+ "filter_height": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }]
+ }]
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(GetValidInputOutputTensorNames, ValidTensorsFixture)
+{
+ Setup();
+ BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0).size(), 1u);
+ BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
+ BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0)[0], "In");
+ BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
+}
+
+BOOST_FIXTURE_TEST_CASE(ThrowIfSubgraphIdInvalidForInOutNames, ValidTensorsFixture)
+{
+ Setup();
+
+ // these throw because of the invalid subgraph id
+ BOOST_CHECK_THROW(m_Parser->GetSubgraphInputTensorNames(1), armnn::ParseException);
+ BOOST_CHECK_THROW(m_Parser->GetSubgraphOutputTensorNames(1), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/LoadModel.cpp b/src/armnnTfLiteParser/test/LoadModel.cpp
new file mode 100644
index 0000000000..a87eba83ac
--- /dev/null
+++ b/src/armnnTfLiteParser/test/LoadModel.cpp
@@ -0,0 +1,241 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <unistd.h>
+
+using armnnTfLiteParser::TfLiteParser;
+using ModelPtr = TfLiteParser::ModelPtr;
+using SubGraphPtr = TfLiteParser::SubGraphPtr;
+using OperatorPtr = TfLiteParser::OperatorPtr;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct LoadModelFixture : public ParserFlatbuffersFixture
+{
+ explicit LoadModelFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" }, { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 1, 1, 1 ] ,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": [ 1, 2, 2, 1 ] ,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ },
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "ConvInputTensor",
+ "quantization": {
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "ConvOutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 1,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ }
+ ],
+ "description": "Test loading a model",
+ "buffers" : [ {}, {} ]
+ })";
+
+ ReadStringToBinary();
+ }
+
+ void CheckModel(const ModelPtr& model, uint32_t version, size_t opcodeSize,
+ const std::vector<tflite::BuiltinOperator>& opcodes,
+ size_t subgraphs, const std::string desc, size_t buffers)
+ {
+ BOOST_CHECK(model);
+ BOOST_CHECK_EQUAL(version, model->version);
+ BOOST_CHECK_EQUAL(opcodeSize, model->operator_codes.size());
+ CheckBuiltinOperators(opcodes, model->operator_codes);
+ BOOST_CHECK_EQUAL(subgraphs, model->subgraphs.size());
+ BOOST_CHECK_EQUAL(desc, model->description);
+ BOOST_CHECK_EQUAL(buffers, model->buffers.size());
+ }
+
+ void CheckBuiltinOperators(const std::vector<tflite::BuiltinOperator>& expectedOperators,
+ const std::vector<std::unique_ptr<tflite::OperatorCodeT>>& result)
+ {
+ BOOST_CHECK_EQUAL(expectedOperators.size(), result.size());
+ for (size_t i = 0; i < expectedOperators.size(); i++)
+ {
+ BOOST_CHECK_EQUAL(expectedOperators[i], result[i]->builtin_code);
+ }
+ }
+
+ void CheckSubgraph(const SubGraphPtr& subgraph, size_t tensors, const std::vector<int32_t>& inputs,
+ const std::vector<int32_t>& outputs, size_t operators, const std::string& name)
+ {
+ BOOST_CHECK(subgraph);
+ BOOST_CHECK_EQUAL(tensors, subgraph->tensors.size());
+ BOOST_CHECK_EQUAL_COLLECTIONS(inputs.begin(), inputs.end(), subgraph->inputs.begin(), subgraph->inputs.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(outputs.begin(), outputs.end(),
+ subgraph->outputs.begin(), subgraph->outputs.end());
+ BOOST_CHECK_EQUAL(operators, subgraph->operators.size());
+ BOOST_CHECK_EQUAL(name, subgraph->name);
+ }
+
+ void CheckOperator(const OperatorPtr& operatorPtr, uint32_t opcode, const std::vector<int32_t>& inputs,
+ const std::vector<int32_t>& outputs, tflite::BuiltinOptions optionType,
+ tflite::CustomOptionsFormat custom_options_format)
+ {
+ BOOST_CHECK(operatorPtr);
+ BOOST_CHECK_EQUAL(opcode, operatorPtr->opcode_index);
+ BOOST_CHECK_EQUAL_COLLECTIONS(inputs.begin(), inputs.end(),
+ operatorPtr->inputs.begin(), operatorPtr->inputs.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(outputs.begin(), outputs.end(),
+ operatorPtr->outputs.begin(), operatorPtr->outputs.end());
+ BOOST_CHECK_EQUAL(optionType, operatorPtr->builtin_options.type);
+ BOOST_CHECK_EQUAL(custom_options_format, operatorPtr->custom_options_format);
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(LoadModelFromBinary, LoadModelFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
+ 2, "Test loading a model", 2);
+ CheckSubgraph(model->subgraphs[0], 2, { 1 }, { 0 }, 1, "");
+ CheckSubgraph(model->subgraphs[1], 3, { 0 }, { 1 }, 1, "");
+ CheckOperator(model->subgraphs[0]->operators[0], 0, { 1 }, { 0 }, tflite::BuiltinOptions_Pool2DOptions,
+ tflite::CustomOptionsFormat_FLEXBUFFERS);
+ CheckOperator(model->subgraphs[1]->operators[0], 1, { 0, 2 }, { 1 }, tflite::BuiltinOptions_Conv2DOptions,
+ tflite::CustomOptionsFormat_FLEXBUFFERS);
+}
+
+BOOST_FIXTURE_TEST_CASE(LoadModelFromFile, LoadModelFixture)
+{
+ std::string fname = boost::filesystem::temp_directory_path().string() + "/testtflite.tflite";
+ bool saved = flatbuffers::SaveFile(fname.c_str(),
+ reinterpret_cast<char *>(m_GraphBinary.data()),
+ m_GraphBinary.size(), true);
+ BOOST_CHECK_MESSAGE(saved, "Cannot save test file");
+
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromFile(fname.c_str());
+ CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
+ 2, "Test loading a model", 2);
+ CheckSubgraph(model->subgraphs[0], 2, { 1 }, { 0 }, 1, "");
+ CheckSubgraph(model->subgraphs[1], 3, { 0 }, { 1 }, 1, "");
+ CheckOperator(model->subgraphs[0]->operators[0], 0, { 1 }, { 0 }, tflite::BuiltinOptions_Pool2DOptions,
+ tflite::CustomOptionsFormat_FLEXBUFFERS);
+ CheckOperator(model->subgraphs[1]->operators[0], 1, { 0, 2 }, { 1 }, tflite::BuiltinOptions_Conv2DOptions,
+ tflite::CustomOptionsFormat_FLEXBUFFERS);
+ remove(fname.c_str());
+}
+
+BOOST_AUTO_TEST_CASE(LoadNullBinary)
+{
+ BOOST_CHECK_THROW(TfLiteParser::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(LoadInvalidBinary)
+{
+ std::string testData = "invalid data";
+ BOOST_CHECK_THROW(TfLiteParser::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
+ testData.length()), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_CASE(LoadFileNotFound)
+{
+ BOOST_CHECK_THROW(TfLiteParser::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
+}
+
+BOOST_AUTO_TEST_CASE(LoadNullPtrFile)
+{
+ BOOST_CHECK_THROW(TfLiteParser::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
new file mode 100644
index 0000000000..590675b46c
--- /dev/null
+++ b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
@@ -0,0 +1,61 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "../TfLiteParser.hpp"
+#include <iostream>
+#include <string>
+
+struct TfLiteParserFixture
+{
+
+ armnnTfLiteParser::TfLiteParser m_Parser;
+ unsigned int m_InputShape[4];
+
+ TfLiteParserFixture() : m_Parser( ), m_InputShape { 1, 2, 2, 1 } {
+ m_Parser.Create();
+ }
+ ~TfLiteParserFixture() { }
+
+};
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser);
+
+
+BOOST_FIXTURE_TEST_CASE( EmptySqueezeDims_OutputWithAllDimensionsSqueezed, TfLiteParserFixture )
+{
+
+ std::vector<uint32_t> squeezeDims = { };
+
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
+ BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
+ BOOST_TEST(outputTensorInfo.GetNumDimensions() == 2);
+ BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 2, 2 })));
+};
+
+BOOST_FIXTURE_TEST_CASE( SqueezeDimsNotIncludingSizeOneDimensions_NoDimensionsSqueezedInOutput, TfLiteParserFixture )
+{
+ std::vector<uint32_t> squeezeDims = { 1, 2 };
+
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
+ BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
+ BOOST_TEST(outputTensorInfo.GetNumDimensions() == 4);
+ BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+};
+
+BOOST_FIXTURE_TEST_CASE( SqueezeDimsRangePartial_OutputWithDimensionsWithinRangeSqueezed, TfLiteParserFixture )
+{
+ std::vector<uint32_t> squeezeDims = { 1, 3 };
+
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
+ BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
+ BOOST_TEST(outputTensorInfo.GetNumDimensions() == 3);
+ BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2 })));
+};
+
+BOOST_AUTO_TEST_SUITE_END(); \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
new file mode 100644
index 0000000000..3687a6ed00
--- /dev/null
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -0,0 +1,229 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include <boost/filesystem.hpp>
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+#include <experimental/filesystem>
+#include <armnn/IRuntime.hpp>
+#include <armnn/TypesUtils.hpp>
+#include "test/TensorHelpers.hpp"
+
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+
+#include "flatbuffers/idl.h"
+#include "flatbuffers/util.h"
+
+#include <schema_generated.h>
+#include <iostream>
+
+using armnnTfLiteParser::ITfLiteParser;
+using TensorRawPtr = const tflite::TensorT *;
+
+struct ParserFlatbuffersFixture
+{
+ ParserFlatbuffersFixture()
+ : m_Parser(ITfLiteParser::Create()), m_NetworkIdentifier(-1)
+ {
+ armnn::IRuntime::CreationOptions options;
+ m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::CpuRef));
+
+#if ARMCOMPUTENEON_ENABLED
+ m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::CpuAcc));
+#endif
+
+#if ARMCOMPUTECL_ENABLED
+ m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::GpuAcc));
+#endif
+ }
+
+ std::vector<uint8_t> m_GraphBinary;
+ std::string m_JsonString;
+ std::unique_ptr<ITfLiteParser, void (*)(ITfLiteParser *parser)> m_Parser;
+ std::vector<std::pair<armnn::IRuntimePtr, armnn::Compute>> m_Runtimes;
+ armnn::NetworkId m_NetworkIdentifier;
+
+ /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
+ /// so they don't need to be passed to the single-input-single-output overload of RunTest().
+ std::string m_SingleInputName;
+ std::string m_SingleOutputName;
+
+ void Setup()
+ {
+ bool ok = ReadStringToBinary();
+ if (!ok) {
+ throw armnn::Exception("LoadNetwork failed while reading binary input");
+ }
+
+ for (auto&& runtime : m_Runtimes)
+ {
+ armnn::INetworkPtr network =
+ m_Parser->CreateNetworkFromBinary(m_GraphBinary);
+
+ if (!network) {
+ throw armnn::Exception("The parser failed to create an ArmNN network");
+ }
+
+ auto optimized = Optimize(*network,
+ { runtime.second, armnn::Compute::CpuRef },
+ runtime.first->GetDeviceSpec());
+ std::string errorMessage;
+
+ armnn::Status ret = runtime.first->LoadNetwork(m_NetworkIdentifier,
+ move(optimized),
+ errorMessage);
+
+ if (ret != armnn::Status::Success)
+ {
+ throw armnn::Exception(
+ boost::str(
+ boost::format("The runtime failed to load the network. "
+ "Error was: %1%. in %2% [%3%:%4%]") %
+ errorMessage %
+ __func__ %
+ __FILE__ %
+ __LINE__));
+ }
+ }
+ }
+
+ void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName)
+ {
+ // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+ m_SingleInputName = inputName;
+ m_SingleOutputName = outputName;
+ Setup();
+ }
+
+ bool ReadStringToBinary()
+ {
+ const char* schemafileName = getenv("ARMNN_TF_LITE_SCHEMA_PATH");
+ if (schemafileName == nullptr)
+ {
+ schemafileName = ARMNN_TF_LITE_SCHEMA_PATH;
+ }
+ std::string schemafile;
+
+ bool ok = flatbuffers::LoadFile(schemafileName, false, &schemafile);
+ BOOST_ASSERT_MSG(ok, "Couldn't load schema file " ARMNN_TF_LITE_SCHEMA_PATH);
+ if (!ok)
+ {
+ return false;
+ }
+
+ // parse schema first, so we can use it to parse the data after
+ flatbuffers::Parser parser;
+
+ ok &= parser.Parse(schemafile.c_str());
+ BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+
+ ok &= parser.Parse(m_JsonString.c_str());
+ BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+
+ if (!ok)
+ {
+ return false;
+ }
+
+ {
+ const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
+ size_t size = static_cast<size_t>(parser.builder_.GetSize());
+ m_GraphBinary.assign(bufferPtr, bufferPtr+size);
+ }
+ return ok;
+ }
+
+ /// Executes the network with the given input tensor and checks the result against the given output tensor.
+ /// This overload assumes the network has a single input and a single output.
+ template <std::size_t NumOutputDimensions, typename DataType>
+ void RunTest(size_t subgraphId,
+ const std::vector<DataType>& inputData,
+ const std::vector<DataType>& expectedOutputData);
+
+ /// Executes the network with the given input tensors and checks the results against the given output tensors.
+ /// This overload supports multiple inputs and multiple outputs, identified by name.
+ template <std::size_t NumOutputDimensions, typename DataType>
+ void RunTest(size_t subgraphId,
+ const std::map<std::string, std::vector<DataType>>& inputData,
+ const std::map<std::string, std::vector<DataType>>& expectedOutputData);
+
+ void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector<int32_t>& shape,
+ tflite::TensorType tensorType, uint32_t buffer, const std::string& name,
+ const std::vector<float>& min, const std::vector<float>& max,
+ const std::vector<float>& scale, const std::vector<int64_t>& zeroPoint)
+ {
+ BOOST_CHECK(tensors);
+ BOOST_CHECK_EQUAL(shapeSize, tensors->shape.size());
+ BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end());
+ BOOST_CHECK_EQUAL(tensorType, tensors->type);
+ BOOST_CHECK_EQUAL(buffer, tensors->buffer);
+ BOOST_CHECK_EQUAL(name, tensors->name);
+ BOOST_CHECK(tensors->quantization);
+ BOOST_CHECK_EQUAL_COLLECTIONS(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
+ tensors->quantization.get()->min.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
+ tensors->quantization.get()->max.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
+ tensors->quantization.get()->scale.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(zeroPoint.begin(), zeroPoint.end(),
+ tensors->quantization.get()->zero_point.begin(),
+ tensors->quantization.get()->zero_point.end());
+ }
+};
+
+template <std::size_t NumOutputDimensions, typename DataType>
+void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
+ const std::vector<DataType>& inputData,
+ const std::vector<DataType>& expectedOutputData)
+{
+ RunTest<NumOutputDimensions, DataType>(subgraphId,
+ { { m_SingleInputName, inputData } },
+ { { m_SingleOutputName, expectedOutputData } });
+}
+
+template <std::size_t NumOutputDimensions, typename DataType>
+void
+ParserFlatbuffersFixture::RunTest(size_t subgraphId,
+ const std::map<std::string, std::vector<DataType>>& inputData,
+ const std::map<std::string, std::vector<DataType>>& expectedOutputData)
+{
+ for (auto&& runtime : m_Runtimes)
+ {
+ using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
+
+ // Setup the armnn input tensors from the given vectors.
+ armnn::InputTensors inputTensors;
+ for (auto&& it : inputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
+ armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+ }
+
+ // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
+ std::map<std::string, boost::multi_array<DataType, NumOutputDimensions>> outputStorage;
+ armnn::OutputTensors outputTensors;
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
+ armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ outputStorage.emplace(it.first, MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second));
+ outputTensors.push_back(
+ { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
+ }
+
+ runtime.first->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+
+ // Compare each output tensor to the expected values
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
+ auto outputExpected = MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second, it.second);
+ BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+ }
+ }
+}
diff --git a/src/armnnTfLiteParser/test/Softmax.cpp b/src/armnnTfLiteParser/test/Softmax.cpp
new file mode 100644
index 0000000000..bb47738cf1
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Softmax.cpp
@@ -0,0 +1,78 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SoftmaxFixture : public ParserFlatbuffersFixture
+{
+ explicit SoftmaxFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "SOFTMAX" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": [ 1, 7 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 7 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 0.00390625 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "SoftmaxOptions",
+ "builtin_options": {
+ "beta": 1.0
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [ {}, {} ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
+{
+ RunTest<2, uint8_t>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
diff --git a/src/armnnTfLiteParser/test/Squeeze.cpp b/src/armnnTfLiteParser/test/Squeeze.cpp
new file mode 100644
index 0000000000..a8c99793ad
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Squeeze.cpp
@@ -0,0 +1,144 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SqueezeFixture : public ParserFlatbuffersFixture
+{
+ explicit SqueezeFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& squeezeDims)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "SQUEEZE" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {)";
+ m_JsonString += R"(
+ "shape" : )" + inputShape + ",";
+ m_JsonString += R"(
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {)";
+ m_JsonString += R"(
+ "shape" : )" + outputShape;
+ m_JsonString += R"(,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "SqueezeOptions",
+ "builtin_options": {)";
+ if (!squeezeDims.empty())
+ {
+ m_JsonString += R"("squeeze_dims" : )" + squeezeDims;
+ }
+ m_JsonString += R"(},
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [ {}, {} ]
+ }
+ )";
+ }
+};
+
+struct SqueezeFixtureWithSqueezeDims : SqueezeFixture
+{
+ SqueezeFixtureWithSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]", "[ 2, 2, 1 ]", "[ 0, 1, 2 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
+{
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ RunTest<3, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ == armnn::TensorShape({2,2,1})));
+
+}
+
+struct SqueezeFixtureWithoutSqueezeDims : SqueezeFixture
+{
+ SqueezeFixtureWithoutSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]", "[ 2, 2 ]", "") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
+{
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ RunTest<2, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ == armnn::TensorShape({2,2})));
+}
+
+struct SqueezeFixtureWithInvalidInput : SqueezeFixture
+{
+ SqueezeFixtureWithInvalidInput() : SqueezeFixture("[ 1, 2, 2, 1, 2 ]", "[ 1, 2, 2, 1 ]", "[ ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeInvalidInput, SqueezeFixtureWithInvalidInput)
+{
+ BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")),
+ armnn::InvalidArgumentException);
+}
+
+struct SqueezeFixtureWithSqueezeDimsSizeInvalid : SqueezeFixture
+{
+ SqueezeFixtureWithSqueezeDimsSizeInvalid() : SqueezeFixture("[ 1, 2, 2, 1 ]",
+ "[ 1, 2, 2, 1 ]",
+ "[ 1, 2, 2, 2, 2 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeInvalidSqueezeDims, SqueezeFixtureWithSqueezeDimsSizeInvalid)
+{
+ BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
+}
+
+
+struct SqueezeFixtureWithNegativeSqueezeDims : SqueezeFixture
+{
+ SqueezeFixtureWithNegativeSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]",
+ "[ 1, 2, 2, 1 ]",
+ "[ -2 , 2 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeNegativeSqueezeDims, SqueezeFixtureWithNegativeSqueezeDims)
+{
+ BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()