diff options
author | James Conroy <james.conroy@arm.com> | 2020-07-10 13:01:01 +0100 |
---|---|---|
committer | James Conroy <james.conroy@arm.com> | 2020-07-10 14:34:18 +0100 |
commit | 3d5672ffa0c2fbb1811184cd2d1ebaa501315677 (patch) | |
tree | 5e4cb1cda6d9752279acecf95f2ba4e379979219 /src/armnnTfLiteParser/TfLiteParser.cpp | |
parent | 1fdeb99ca83bac83e0cacb332880e2e62dd22198 (diff) | |
download | armnn-3d5672ffa0c2fbb1811184cd2d1ebaa501315677.tar.gz |
Github #388 Remove TfLite Concat quant validation
* Matching quant validation was added to TfLite
parser as per TfLite documentation.
* Removing this validation for Concat and Pad as
it is causing some nightly model test failures.
Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: Ie128fb66cc2d4a193ac22dc9eb41f5703d113663
Diffstat (limited to 'src/armnnTfLiteParser/TfLiteParser.cpp')
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.cpp | 6 |
1 files changed, 0 insertions, 6 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 537ea3c0c4..b1ec0e54c2 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -1942,10 +1942,7 @@ void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex) } auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex); - - TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]); TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); - CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str()); ARMNN_ASSERT(layer != nullptr); @@ -2313,10 +2310,7 @@ void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex } auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex); - - TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]); TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); - CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str()); ARMNN_ASSERT(layer != nullptr); |