aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser/test
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-10-23 17:38:17 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-10-24 14:47:31 +0000
commitc975f9295e076febd4ecd45c9174d54f7327b3cc (patch)
treea712ed96879644a720a94cde01d927300ee5af17 /src/armnnTfLiteParser/test
parent7bbdf9db051f40377a284a28375816e60349376d (diff)
downloadarmnn-c975f9295e076febd4ecd45c9174d54f7327b3cc.tar.gz
IVGCVSW-4007 Add StandInLayer for unsupported operations in TfLiteParser
* Fixed bug in custom operator support that caused all custom operators to be parsed as a DetectionPostProcessLayer * Added support for handling unsupported operators (built-in or custom) by replacing them with a StandInLayer in the generated network * Added options to TfLiteParser to control whether we want to use StandInLayers when we encounter unsupported operators, or we prefer to throw a ParserException as until now Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I125a63016c7c510b1fdde6033842db4f276718c4
Diffstat (limited to 'src/armnnTfLiteParser/test')
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp19
-rw-r--r--src/armnnTfLiteParser/test/Unsupported.cpp249
2 files changed, 261 insertions, 7 deletions
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index b20bea247d..0c6428012b 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -29,22 +29,27 @@
#include <iostream>
using armnnTfLiteParser::ITfLiteParser;
-using TensorRawPtr = const tflite::TensorT *;
+using armnnTfLiteParser::ITfLiteParserPtr;
+using TensorRawPtr = const tflite::TensorT *;
struct ParserFlatbuffersFixture
{
ParserFlatbuffersFixture() :
- m_Parser(ITfLiteParser::Create()),
+ m_Parser(nullptr, &ITfLiteParser::Destroy),
m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions())),
m_NetworkIdentifier(-1)
{
+ ITfLiteParser::TfLiteParserOptions options;
+ options.m_StandInLayerForUnsupported = true;
+
+ m_Parser.reset(ITfLiteParser::CreateRaw(armnn::Optional<ITfLiteParser::TfLiteParserOptions>(options)));
}
std::vector<uint8_t> m_GraphBinary;
- std::string m_JsonString;
- std::unique_ptr<ITfLiteParser, void (*)(ITfLiteParser *parser)> m_Parser;
- armnn::IRuntimePtr m_Runtime;
- armnn::NetworkId m_NetworkIdentifier;
+ std::string m_JsonString;
+ ITfLiteParserPtr m_Parser;
+ armnn::IRuntimePtr m_Runtime;
+ armnn::NetworkId m_NetworkIdentifier;
/// If the single-input-single-output overload of Setup() is called, these will store the input and output name
/// so they don't need to be passed to the single-input-single-output overload of RunTest().
@@ -346,4 +351,4 @@ void ParserFlatbuffersFixture::RunTest(std::size_t subgraphId,
}
}
}
-} \ No newline at end of file
+}
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
new file mode 100644
index 0000000000..25abde8142
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -0,0 +1,249 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <armnn/LayerVisitorBase.hpp>
+
+#include <layers/StandInLayer.hpp>
+
+#include <boost/assert.hpp>
+#include <boost/polymorphic_cast.hpp>
+#include <boost/test/unit_test.hpp>
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+using namespace armnn;
+
+class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
+{
+public:
+ StandInLayerVerifier(const std::vector<TensorInfo>& inputInfos,
+ const std::vector<TensorInfo>& outputInfos)
+ : LayerVisitorBase<VisitorThrowingPolicy>()
+ , m_InputInfos(inputInfos)
+ , m_OutputInfos(outputInfos) {}
+
+ void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
+
+ void VisitOutputLayer(const IConnectableLayer*, LayerBindingId id, const char*) override {}
+
+ void VisitStandInLayer(const IConnectableLayer* layer,
+ const StandInDescriptor& descriptor,
+ const char*) override
+ {
+ unsigned int numInputs = boost::numeric_cast<unsigned int>(m_InputInfos.size());
+ BOOST_CHECK(descriptor.m_NumInputs == numInputs);
+ BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
+
+ unsigned int numOutputs = boost::numeric_cast<unsigned int>(m_OutputInfos.size());
+ BOOST_CHECK(descriptor.m_NumOutputs == numOutputs);
+ BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
+
+ const StandInLayer* standInLayer = boost::polymorphic_downcast<const StandInLayer*>(layer);
+ for (unsigned int i = 0u; i < numInputs; ++i)
+ {
+ const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
+ BOOST_CHECK(connectedSlot != nullptr);
+
+ const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
+ BOOST_CHECK(inputInfo == m_InputInfos[i]);
+ }
+
+ for (unsigned int i = 0u; i < numOutputs; ++i)
+ {
+ const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
+ BOOST_CHECK(outputInfo == m_OutputInfos[i]);
+ }
+ }
+
+private:
+ std::vector<TensorInfo> m_InputInfos;
+ std::vector<TensorInfo> m_OutputInfos;
+};
+
+class DummyCustomFixture : public ParserFlatbuffersFixture
+{
+public:
+ explicit DummyCustomFixture(const std::vector<TensorInfo>& inputInfos,
+ const std::vector<TensorInfo>& outputInfos)
+ : ParserFlatbuffersFixture()
+ , m_StandInLayerVerifier(inputInfos, outputInfos)
+ {
+ const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
+ BOOST_ASSERT(numInputs > 0);
+
+ const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
+ BOOST_ASSERT(numOutputs > 0);
+
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [{
+ "builtin_code": "CUSTOM",
+ "custom_code": "DummyCustomOperator"
+ }],
+ "subgraphs": [ {
+ "tensors": [)";
+
+ // Add input tensors
+ for (unsigned int i = 0u; i < numInputs; ++i)
+ {
+ const TensorInfo& inputInfo = inputInfos[i];
+ m_JsonString += R"(
+ {
+ "shape": )" + GetTensorShapeAsString(inputInfo.GetShape()) + R"(,
+ "type": )" + GetDataTypeAsString(inputInfo.GetDataType()) + R"(,
+ "buffer": 0,
+ "name": "inputTensor)" + std::to_string(i) + R"(",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + std::to_string(inputInfo.GetQuantizationScale()) + R"( ],
+ "zero_point": [ )" + std::to_string(inputInfo.GetQuantizationOffset()) + R"( ],
+ }
+ },)";
+ }
+
+ // Add output tensors
+ for (unsigned int i = 0u; i < numOutputs; ++i)
+ {
+ const TensorInfo& outputInfo = outputInfos[i];
+ m_JsonString += R"(
+ {
+ "shape": )" + GetTensorShapeAsString(outputInfo.GetShape()) + R"(,
+ "type": )" + GetDataTypeAsString(outputInfo.GetDataType()) + R"(,
+ "buffer": 0,
+ "name": "outputTensor)" + std::to_string(i) + R"(",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + std::to_string(outputInfo.GetQuantizationScale()) + R"( ],
+ "zero_point": [ )" + std::to_string(outputInfo.GetQuantizationOffset()) + R"( ],
+ }
+ })";
+
+ if (i + 1 < numOutputs)
+ {
+ m_JsonString += ",";
+ }
+ }
+
+ const std::string inputIndices = GetIndicesAsString(0u, numInputs - 1u);
+ const std::string outputIndices = GetIndicesAsString(numInputs, numInputs + numOutputs - 1u);
+
+ // Add dummy custom operator
+ m_JsonString += R"(],
+ "inputs": )" + inputIndices + R"(,
+ "outputs": )" + outputIndices + R"(,
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": )" + inputIndices + R"(,
+ "outputs": )" + outputIndices + R"(,
+ "builtin_options_type": 0,
+ "custom_options": [ ],
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { }
+ ]
+ }
+ )";
+
+ ReadStringToBinary();
+ }
+
+ void RunTest()
+ {
+ INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
+ network->Accept(m_StandInLayerVerifier);
+ }
+
+private:
+ static std::string GetTensorShapeAsString(const TensorShape& tensorShape)
+ {
+ std::stringstream stream;
+ stream << "[ ";
+ for (unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
+ {
+ stream << tensorShape[i];
+ if (i + 1 < tensorShape.GetNumDimensions())
+ {
+ stream << ",";
+ }
+ stream << " ";
+ }
+ stream << "]";
+
+ return stream.str();
+ }
+
+ static std::string GetDataTypeAsString(DataType dataType)
+ {
+ switch (dataType)
+ {
+ case DataType::Float32: return "FLOAT32";
+ case DataType::QuantisedAsymm8: return "UINT8";
+ default: return "UNKNOWN";
+ }
+ }
+
+ static std::string GetIndicesAsString(unsigned int first, unsigned int last)
+ {
+ std::stringstream stream;
+ stream << "[ ";
+ for (unsigned int i = first; i <= last ; ++i)
+ {
+ stream << i;
+ if (i + 1 <= last)
+ {
+ stream << ",";
+ }
+ stream << " ";
+ }
+ stream << "]";
+
+ return stream.str();
+ }
+
+ StandInLayerVerifier m_StandInLayerVerifier;
+};
+
+class DummyCustom1Input1OutputFixture : public DummyCustomFixture
+{
+public:
+ DummyCustom1Input1OutputFixture()
+ : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32) },
+ { TensorInfo({ 2, 2 }, DataType::Float32) }) {}
+};
+
+class DummyCustom2Inputs1OutputFixture : public DummyCustomFixture
+{
+public:
+ DummyCustom2Inputs1OutputFixture()
+ : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32), TensorInfo({ 2, 2 }, DataType::Float32) },
+ { TensorInfo({ 3, 3 }, DataType::Float32) }) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator1Input1Output, DummyCustom1Input1OutputFixture)
+{
+ RunTest();
+}
+
+BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator2Inputs1Output, DummyCustom2Inputs1OutputFixture)
+{
+ RunTest();
+}
+
+BOOST_AUTO_TEST_SUITE_END()