diff options
Diffstat (limited to 'src/armnnTfLiteParser')
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.cpp | 62 | ||||
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.hpp | 2 | ||||
-rw-r--r-- | src/armnnTfLiteParser/test/Power.cpp | 101 | ||||
-rw-r--r-- | src/armnnTfLiteParser/test/SquaredDifference.cpp | 101 |
4 files changed, 266 insertions, 0 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 244f1fa197..5075da41c2 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -797,6 +797,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack; m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad; m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad; + m_ParserFunctions[tflite::BuiltinOperator_POW] = &TfLiteParserImpl::ParsePower; m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu; m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize; m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu; @@ -818,6 +819,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit; m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV; m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze; + m_ParserFunctions[tflite::BuiltinOperator_SQUARED_DIFFERENCE] = &TfLiteParserImpl::ParseSquaredDifference; m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice; m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub; m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum; @@ -4584,6 +4586,36 @@ void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex) ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg); } +void TfLiteParserImpl::ParsePower(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = fmt::format("Power:{}:{}", subgraphIndex, operatorIndex); + + TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0); + TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1); + CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1"); + + IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Power, layerName.c_str()); + ARMNN_ASSERT(layer != nullptr); + + TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); + CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]}); + + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); +} + void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex) { ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt); @@ -4599,6 +4631,36 @@ void TfLiteParserImpl::ParseSqrt(size_t subgraphIndex, size_t operatorIndex) ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sqrt); } +void TfLiteParserImpl::ParseSquaredDifference(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = fmt::format("SquaredDifference:{}:{}", subgraphIndex, operatorIndex); + + TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0); + TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1); + CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1"); + + IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::SqDiff, layerName.c_str()); + ARMNN_ASSERT(layer != nullptr); + + TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); + CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]}); + + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); +} + void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp index 91fad43b8a..774d054572 100644 --- a/src/armnnTfLiteParser/TfLiteParser.hpp +++ b/src/armnnTfLiteParser/TfLiteParser.hpp @@ -163,6 +163,7 @@ private: void ParsePack(size_t subgraphIndex, size_t operatorIndex); void ParsePad(size_t subgraphIndex, size_t operatorIndex); void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm); + void ParsePower(size_t subgraphIndex, size_t operatorIndex); void ParsePrelu(size_t subgraphIndex, size_t operatorIndex); void ParseQuantize(size_t subgraphIndex, size_t operatorIndex); void ParseReduce(size_t subgraphIndex, size_t operatorIndex, armnn::ReduceOperation reduceOperation); @@ -186,6 +187,7 @@ private: void ParseSplit(size_t subgraphIndex, size_t operatorIndex); void ParseSplitV(size_t subgraphIndex, size_t operatorIndex); void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex); + void ParseSquaredDifference(size_t subgraphIndex, size_t operatorIndex); void ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex); void ParseSub(size_t subgraphIndex, size_t operatorIndex); void ParseSum(size_t subgraphIndex, size_t operatorIndex); diff --git a/src/armnnTfLiteParser/test/Power.cpp b/src/armnnTfLiteParser/test/Power.cpp new file mode 100644 index 0000000000..f8b354a2d2 --- /dev/null +++ b/src/armnnTfLiteParser/test/Power.cpp @@ -0,0 +1,101 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ParserFlatbuffersFixture.hpp" + +#include <doctest/doctest.h> + + +TEST_SUITE("TensorflowLiteParser_Power") +{ + struct PowerFixture : public ParserFlatbuffersFixture + { + explicit PowerFixture(const std::string & inputShape1, + const std::string & inputShape2, + const std::string & outputShape, + const std::string & activation="NONE") + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ { "builtin_code": "POW" } ], + "subgraphs": [ { + "tensors": [ + { + "shape": )" + inputShape1 + R"(, + "type": "UINT8", + "buffer": 0, + "name": "inputTensor1", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + inputShape2 + R"(, + "type": "UINT8", + "buffer": 1, + "name": "inputTensor2", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + outputShape + R"( , + "type": "UINT8", + "buffer": 2, + "name": "outputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + } + ], + "inputs": [ 0, 1 ], + "outputs": [ 2 ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ 0, 1 ], + "outputs": [ 2 ], + "custom_options_format": "FLEXBUFFERS" + } + ], + } ], + "buffers" : [ + { }, + { } + ] + } + )"; + Setup(); + } + }; + + + struct SimplePowerFixture : PowerFixture + { + SimplePowerFixture() : PowerFixture("[ 2, 2 ]", + "[ 2, 2 ]", + "[ 2, 2 ]") {} + }; + + TEST_CASE_FIXTURE(SimplePowerFixture, "SimplePower") + { + RunTest<2, armnn::DataType::QAsymmU8>( + 0, + {{"inputTensor1", { 0, 1, 2, 3 }}, + {"inputTensor2", { 4, 5, 6, 3 }}}, + {{"outputTensor", { 0, 1, 64, 27 }}}); + } + +}
\ No newline at end of file diff --git a/src/armnnTfLiteParser/test/SquaredDifference.cpp b/src/armnnTfLiteParser/test/SquaredDifference.cpp new file mode 100644 index 0000000000..97b4d3fede --- /dev/null +++ b/src/armnnTfLiteParser/test/SquaredDifference.cpp @@ -0,0 +1,101 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ParserFlatbuffersFixture.hpp" + +#include <doctest/doctest.h> + + +TEST_SUITE("TensorflowLiteParser_SquaredDifference") +{ + struct SquaredDifferenceFixture : public ParserFlatbuffersFixture + { + explicit SquaredDifferenceFixture(const std::string & inputShape1, + const std::string & inputShape2, + const std::string & outputShape, + const std::string & activation="NONE") + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ { "builtin_code": "SQUARED_DIFFERENCE" } ], + "subgraphs": [ { + "tensors": [ + { + "shape": )" + inputShape1 + R"(, + "type": "UINT8", + "buffer": 0, + "name": "inputTensor1", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + inputShape2 + R"(, + "type": "UINT8", + "buffer": 1, + "name": "inputTensor2", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + outputShape + R"( , + "type": "UINT8", + "buffer": 2, + "name": "outputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + } + ], + "inputs": [ 0, 1 ], + "outputs": [ 2 ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ 0, 1 ], + "outputs": [ 2 ], + "custom_options_format": "FLEXBUFFERS" + } + ], + } ], + "buffers" : [ + { }, + { } + ] + } + )"; + Setup(); + } + }; + + + struct SimpleSquaredDifferenceFixture : SquaredDifferenceFixture + { + SimpleSquaredDifferenceFixture() : SquaredDifferenceFixture("[ 2, 2 ]", + "[ 2, 2 ]", + "[ 2, 2 ]") {} + }; + + TEST_CASE_FIXTURE(SimpleSquaredDifferenceFixture, "SimpleSquaredDifference") + { + RunTest<2, armnn::DataType::QAsymmU8>( + 0, + {{"inputTensor1", { 4, 1, 8, 9 }}, + {"inputTensor2", { 0, 5, 6, 3 }}}, + {{"outputTensor", { 16, 16, 4, 36 }}}); + } + +}
\ No newline at end of file |