diff options
-rw-r--r-- | docs/05_01_parsers.dox | 3 | ||||
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.cpp | 30 | ||||
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.hpp | 3 | ||||
-rw-r--r-- | src/armnnTfLiteParser/test/LogSoftmax.cpp | 70 |
4 files changed, 103 insertions, 3 deletions
diff --git a/docs/05_01_parsers.dox b/docs/05_01_parsers.dox index 8e406e85cf..99178ac736 100644 --- a/docs/05_01_parsers.dox +++ b/docs/05_01_parsers.dox @@ -1,4 +1,4 @@ -/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. +/// Copyright (c) 2022 Arm Ltd and Contributors. All rights reserved. /// /// SPDX-License-Identifier: MIT /// @@ -146,6 +146,7 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators - LESS_EQUAL - LOGICAL_NOT - LOGISTIC +- LOG_SOFTMAX - L2_NORMALIZATION - MAX_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE - MAXIMUM diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 479fc4f474..5dbb5ee503 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -710,6 +710,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt = &TfLiteParserImpl::ParseLocalResponseNormalization; m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot; m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic; + m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax; m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization; m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D; m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum; @@ -1874,6 +1875,33 @@ void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex) RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); } +void TfLiteParserImpl::ParseLogSoftmax(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + LogSoftmaxDescriptor desc; + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = fmt::format("LogSoftmax:{}:{}", subgraphIndex, operatorIndex); + IConnectableLayer* const layer = m_Network->AddLogSoftmaxLayer(desc, layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + // register the input connection slots for the layer, connections are made after all layers have been created + // only the tensors for the inputs are relevant, exclude the const tensors + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); + + // register the output connection slots for the layer, connections are made after all layers have been created + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); +} + void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp index 3fc72057c7..43c5466e69 100644 --- a/src/armnnTfLiteParser/TfLiteParser.hpp +++ b/src/armnnTfLiteParser/TfLiteParser.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -146,6 +146,7 @@ private: void ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex); void ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex); void ParseLogistic(size_t subgraphIndex, size_t operatorIndex); + void ParseLogSoftmax(size_t subgraphIndex, size_t operatorIndex); void ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex); void ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex); void ParseMaximum(size_t subgraphIndex, size_t operatorIndex); diff --git a/src/armnnTfLiteParser/test/LogSoftmax.cpp b/src/armnnTfLiteParser/test/LogSoftmax.cpp new file mode 100644 index 0000000000..7414bc30a0 --- /dev/null +++ b/src/armnnTfLiteParser/test/LogSoftmax.cpp @@ -0,0 +1,70 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ParserFlatbuffersFixture.hpp" + + +TEST_SUITE("TensorflowLiteParser_LogSoftmax") +{ +struct LogSoftmaxFixture : public ParserFlatbuffersFixture +{ + explicit LogSoftmaxFixture() + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ { "builtin_code": "LOG_SOFTMAX" } ], + "subgraphs": [ { + "tensors": [ + { + "shape": [ 1, 7 ], + "type": "UINT8", + "buffer": 0, + "name": "inputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": [ 1, 7 ], + "type": "UINT8", + "buffer": 1, + "name": "outputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 0.00390625 ], + "zero_point": [ 0 ], + } + } + ], + "inputs": [ 0 ], + "outputs": [ 1 ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ 0 ], + "outputs": [ 1 ], + "builtin_options_type": "LogSoftmaxOptions", + "custom_options_format": "FLEXBUFFERS" + } + ], + } ], + "buffers" : [ {}, {} ] + } + )"; + SetupSingleInputSingleOutput("inputTensor", "outputTensor"); + } +}; + +TEST_CASE_FIXTURE(LogSoftmaxFixture, "ParseLogSoftmaxLite") +{ + RunTest<2, armnn::DataType::QAsymmU8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 }); +} + +} |