diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2019-10-23 17:38:17 +0100 |
---|---|---|
committer | Áron Virginás-Tar <aron.virginas-tar@arm.com> | 2019-10-24 14:47:31 +0000 |
commit | c975f9295e076febd4ecd45c9174d54f7327b3cc (patch) | |
tree | a712ed96879644a720a94cde01d927300ee5af17 /src/armnnTfLiteParser/TfLiteParser.hpp | |
parent | 7bbdf9db051f40377a284a28375816e60349376d (diff) | |
download | armnn-c975f9295e076febd4ecd45c9174d54f7327b3cc.tar.gz |
IVGCVSW-4007 Add StandInLayer for unsupported operations in TfLiteParser
* Fixed bug in custom operator support that caused all custom operators
to be parsed as a DetectionPostProcessLayer
* Added support for handling unsupported operators (built-in or custom)
by replacing them with a StandInLayer in the generated network
* Added options to TfLiteParser to control whether we want to use
StandInLayers when we encounter unsupported operators, or we prefer
to throw a ParserException as until now
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I125a63016c7c510b1fdde6033842db4f276718c4
Diffstat (limited to 'src/armnnTfLiteParser/TfLiteParser.hpp')
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.hpp | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp index fac2599322..fb01fe8ba2 100644 --- a/src/armnnTfLiteParser/TfLiteParser.hpp +++ b/src/armnnTfLiteParser/TfLiteParser.hpp @@ -10,6 +10,7 @@ #include <schema_generated.h> #include <functional> +#include <unordered_map> #include <vector> namespace armnnTfLiteParser @@ -58,7 +59,7 @@ public: /// Return the output tensor names for a given subgraph virtual std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId) const override; - TfLiteParser(); + TfLiteParser(const armnn::Optional<ITfLiteParser::TfLiteParserOptions>& options = armnn::EmptyOptional()); virtual ~TfLiteParser() {} public: @@ -89,7 +90,9 @@ private: // signature for the parser functions using OperatorParsingFunction = void(TfLiteParser::*)(size_t subgraphIndex, size_t operatorIndex); + void ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex); void ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex); + void ParseActivation(size_t subgraphIndex, size_t operatorIndex, armnn::ActivationFunction activationType); void ParseAdd(size_t subgraphIndex, size_t operatorIndex); void ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex); @@ -180,11 +183,16 @@ private: armnn::TensorInfo& tensorInfo, armnn::Optional<armnn::PermutationVector&> permutationVector); + // Settings for configuring the TfLiteParser + armnn::Optional<ITfLiteParser::TfLiteParserOptions> m_Options; + /// The network we're building. Gets cleared after it is passed to the user armnn::INetworkPtr m_Network; - std::vector<OperatorParsingFunction> m_ParserFunctions; ModelPtr m_Model; + std::vector<OperatorParsingFunction> m_ParserFunctions; + std::unordered_map<std::string, OperatorParsingFunction> m_CustomParserFunctions; + /// A mapping of an output slot to each of the input slots it should be connected to /// The outputSlot is from the layer that creates this tensor as one of its ouputs /// The inputSlots are from the layers that use this tensor as one of their inputs |