diff options
author | Inki Dae <inki.dae@samsung.com> | 2020-09-10 15:33:54 +0900 |
---|---|---|
committer | mike.kelly <mike.kelly@arm.com> | 2020-09-24 16:03:00 +0000 |
commit | d4619e28a4cde423d5b4086a98c31f97b52a68d7 (patch) | |
tree | 9e6da174b200e8135ed2bbc2b0b8cb761d3e1a4f /src/armnnTfLiteParser/TfLiteParser.cpp | |
parent | 02036e99c1b2074e5e5f04a2fe443f0c90689683 (diff) | |
download | armnn-d4619e28a4cde423d5b4086a98c31f97b52a68d7.tar.gz |
Add int32 and int64 ArgMax op support
This patch adds int32 and int64 ArgMax op support.
Current ARMNN already has ArgMax op but not used, and
it doesn't support int64 output type.
So this patch adds a new type, Signed64, and also adds
ArgMinMax computation function for int64 type support.
In default, output tensor type of ArgMax op is int64 in case of
tensorflow lite model so this patch makes a proper function - ArgMax op
for int64 or int32 - to be called according to parsed output_type value.
With this patch, ARMNN supports both types - int64 and int32 - for
ArgMinMax op.
Changelog v1:
- Check if output data type of ArgMinMax op is valid or not.
- Use template function to support int32 and int64 types of ArgMinMax function.
- Keep using Signed32 as default data type of m_Output_Type.
Change-Id: I7a8e7e38dd9e5acc81464571d8b4d51378fc7f14
Signed-off-by: Inki Dae <inki.dae@samsung.com>
Diffstat (limited to 'src/armnnTfLiteParser/TfLiteParser.cpp')
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.cpp | 46 |
1 files changed, 45 insertions, 1 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 6143f4af6a..0aad048970 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -345,7 +345,9 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, case tflite::TensorType_INT32: type = armnn::DataType::Signed32; break; - + case tflite::TensorType_INT64: + type = armnn::DataType::Signed64; + break; default: { CheckLocation location = CHECK_LOCATION(); @@ -598,6 +600,7 @@ TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& o m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv; m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack; m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParser::ParseDiv; + m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParser::ParseArgMax; // register supported custom operators m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess; } @@ -2847,6 +2850,47 @@ void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex) RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes); } +void TfLiteParser::ParseArgMax(size_t subgraphIndex, size_t operatorIndex) +{ + const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; + const auto *options = operatorPtr->builtin_options.AsArgMaxOptions(); + + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = boost::str(boost::format("ArgMax:%1%:%2%") % subgraphIndex % operatorIndex); + + armnn::TensorInfo sizeTensorInfo0 = ToTensorInfo(inputs[0]); + armnn::TensorInfo sizeTensorInfo1 = ToTensorInfo(inputs[1]); + + // Get const axis value from model and set it to descriptor. + BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer); + + ArgMinMaxDescriptor desc; + desc.m_Axis = axisBufferPtr->data.data()[0]; + // If output_type is int32 then set Signed32 else Signed64. Default type is Signed64. + desc.m_Output_Type = options->output_type == 3 ? armnn::DataType::Signed32 : armnn::DataType::Signed64; + desc.m_Function = ArgMinMaxFunction::Max; + + // Register a ArgMax layer. + IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + // Register input tensor to the layer. + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); + + // Register output tensor to the layer. + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes); +} + armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer, unsigned int outputSlot, tflite::ActivationFunctionType activationType) |