diff options
author | Samuel Yap <samuel.yap@arm.com> | 2022-08-24 17:04:34 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-09-05 10:47:21 +0100 |
commit | fd3ba5a2f3630dc34094912b1a2c057f790f3092 (patch) | |
tree | 546406ab4199637a4374d859d0a9ad328a63c97d /src/armnnTfLiteParser/TfLiteParser.cpp | |
parent | a04f4a15575ddd778d3a330dbce629412e1ffc0c (diff) | |
download | armnn-fd3ba5a2f3630dc34094912b1a2c057f790f3092.tar.gz |
IVGCVSW-6497: BatchMatMul TfLite Parser
* Added armnnTfLiteParser for BatchMatMul
* Added unit testing for parser
* Updated CMakeLists
Signed-off-by: Samuel Yap <samuel.yap@arm.com>
Change-Id: If6842aaf7cf08f688093b714e2ecea6e8cd87161
Diffstat (limited to 'src/armnnTfLiteParser/TfLiteParser.cpp')
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.cpp | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 880de100c1..030420345e 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -680,6 +680,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax; m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D; m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND; + m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul; m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast; m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation; m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D; @@ -1565,6 +1566,44 @@ void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorI ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average); } +void TfLiteParserImpl::ParseBatchMatMul(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = fmt::format("BatchMatMul:{}:{}", subgraphIndex, operatorIndex); + + TensorInfo inputXTensorInfo = ToTensorInfo(inputs[0]); + TensorInfo inputYTensorInfo = ToTensorInfo(inputs[1]); + + TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true); + + const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; + const auto* options = operatorPtr->builtin_options.AsBatchMatMulOptions(); + + BatchMatMulDescriptor descriptor(false, + false, + options->adj_x, + options->adj_y); + // Arbitrary DataLayout + + IConnectableLayer* layer = m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str()); + ARMNN_ASSERT(layer != nullptr); + + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]}); + + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); +} + void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); |