diff options
Diffstat (limited to 'src/armnnTfLiteParser/TfLiteParser.cpp')
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.cpp | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 1a1e854395..db60224999 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -10,6 +10,7 @@ #include <armnn/Exceptions.hpp> #include <armnn/Logging.hpp> #include <armnn/Tensor.hpp> +#include <armnnUtils/TensorUtils.hpp> #include <armnn/TypesUtils.hpp> #include <armnn/utility/Assert.hpp> #include <armnn/utility/IgnoreUnused.hpp> @@ -580,6 +581,7 @@ TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& o m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze; m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice; m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub; + m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParser::ParseSum; m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH; m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose; m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv; @@ -2994,6 +2996,58 @@ void TfLiteParser::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex) RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); } +void TfLiteParser::ParseSum(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; + const auto *options = operatorPtr->builtin_options.AsReducerOptions(); + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = fmt::format("Sum:{}:{}", subgraphIndex, operatorIndex); + + armnn::TensorInfo inputTensorInfo0 = ToTensorInfo(inputs[0]); + armnn::TensorInfo inputTensorInfo1 = ToTensorInfo(inputs[1]); + TensorShape input0Shape = inputTensorInfo0.GetShape(); + + ReduceDescriptor desc; + + BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer); + // Get const axis value from model and set it to descriptor. + if (axisBufferPtr != nullptr) + { + for (uint32_t i = 0; i < inputTensorInfo1.GetNumElements(); ++i) + { + desc.m_vAxis.push_back(armnnUtils::GetUnsignedAxis(inputTensorInfo0.GetNumDimensions(), + axisBufferPtr->data.data()[i])); + } + } + + desc.m_TargetHeight = input0Shape[1]; + desc.m_TargetWidth = input0Shape[2]; + desc.m_KeepDims = options->keep_dims; + desc.m_ReduceOperation = armnn::ReduceOperation::Sum; + + // Register a new layer object, Sum. + IConnectableLayer *layer = m_Network->AddReduceLayer(desc, layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + // Register input tensor to the layer. + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); + + // Register output tensor to the layer. + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes); +} + armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer, unsigned int outputSlot, tflite::ActivationFunctionType activationType) |