aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2021-09-01 21:22:37 +0100
committerJim Flynn <jim.flynn@arm.com>2021-09-02 08:37:02 +0000
commit31dce2b3fa19781836a9a295b514b2ab37f5d928 (patch)
tree852770e21c4251c91ed80eddc49d2dcbc6047d61
parent00e9ebf026b1e2f6dbbed201ce1abe0091d6453b (diff)
downloadarmnn-31dce2b3fa19781836a9a295b514b2ab37f5d928.tar.gz
IVGCVSW-6294 Added support for LRN to TfLiteParser
* Added support for LRN to TfLiteParser Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Ia34441a4adeecd1f17c65af047d6c207729703ec
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp46
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp1
-rw-r--r--src/armnnTfLiteParser/test/LocalResponseNormalization.cpp107
4 files changed, 155 insertions, 0 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2b0c952547..60753bed3a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -682,6 +682,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/L2Normalization.cpp
src/armnnTfLiteParser/test/LeakyRelu.cpp
src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
+ src/armnnTfLiteParser/test/LocalResponseNormalization.cpp
src/armnnTfLiteParser/test/Maximum.cpp
src/armnnTfLiteParser/test/MaxPool2D.cpp
src/armnnTfLiteParser/test/Mean.cpp
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 2ac325c08c..5c7cb9b0b8 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -657,6 +657,8 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt
m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
+ m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
+ = &TfLiteParserImpl::ParseLocalResponseNormalization;
m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
@@ -3401,6 +3403,50 @@ void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
}
+void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto layerName = fmt::format("LRN:{}:{}", subgraphIndex, operatorIndex);
+ std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+
+ const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto* options = operatorPtr->builtin_options.AsLocalResponseNormalizationOptions();
+
+ armnn::NormalizationDescriptor descriptor;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+ descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
+ descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+ descriptor.m_NormSize = static_cast<uint32_t>(options->radius);
+ descriptor.m_K = options->bias;
+ descriptor.m_Alpha = options->alpha;
+ descriptor.m_Beta = options->beta;
+
+ // ArmNN expects normSize to be the full size of the normalization
+ // window rather than the radius as in TfLite.
+ descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
+
+ IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
+ ARMNN_ASSERT(layer != nullptr);
+
+ TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
{
ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 0f11e41d37..dcd00d887d 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -131,6 +131,7 @@ private:
void ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex);
void ParseLess(size_t subgraphIndex, size_t operatorIndex);
void ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex);
+ void ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex);
void ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex);
void ParseLogistic(size_t subgraphIndex, size_t operatorIndex);
void ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/LocalResponseNormalization.cpp b/src/armnnTfLiteParser/test/LocalResponseNormalization.cpp
new file mode 100644
index 0000000000..5fe6daa582
--- /dev/null
+++ b/src/armnnTfLiteParser/test/LocalResponseNormalization.cpp
@@ -0,0 +1,107 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ParserFlatbuffersFixture.hpp"
+
+TEST_SUITE("TensorflowLiteParser_LRN")
+{
+struct LRNFixture : public ParserFlatbuffersFixture
+{
+ explicit LRNFixture(std::string inputdim, std::string outputdim, std::string dataType)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "LOCAL_RESPONSE_NORMALIZATION" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": )"
+ + outputdim
+ + R"(,
+ "type": )"
+ + dataType
+ + R"(,
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": )"
+ + inputdim
+ + R"(,
+ "type": )"
+ + dataType
+ + R"(,
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "builtin_options_type": "LocalResponseNormalizationOptions",
+ "builtin_options":
+ {
+ "radius": 2,
+ "bias": 1.0,
+ "alpha": 1.0,
+ "beta": 0.5
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ }
+ ],
+ "description": "MaxPool2D test.",
+ "buffers" : [ {}, {} ]
+ })";
+
+ SetupSingleInputSingleOutput("InputTensor", "OutputTensor");
+ }
+};
+
+struct LRNLiteFixtureFloat4DOutput : LRNFixture
+{
+ LRNLiteFixtureFloat4DOutput() : LRNFixture("[ 1, 1, 4, 4 ]", "[ 1, 1, 4, 4 ]", "FLOAT32") {}
+};
+
+TEST_CASE_FIXTURE(LRNLiteFixtureFloat4DOutput, "LRNLiteFloat4DOutput")
+{
+ RunTest<4, armnn::DataType::Float32>(0,
+ {
+ 2.0f, 3.0f, 5.0f, 2.0f,
+ 2.0f, 3.0f, 5.0f, 2.0f,
+ 2.0f, 3.0f, 5.0f, 2.0f,
+ 2.0f, 3.0f, 5.0f, 2.0f
+ },
+ {
+ 0.320256f, 0.457496f, 0.762493f, 0.320256f,
+ 0.320256f, 0.457496f, 0.762493f, 0.320256f,
+ 0.320256f, 0.457496f, 0.762493f, 0.320256f,
+ 0.320256f, 0.457496f, 0.762493f, 0.320256f
+ });
+}
+
+TEST_CASE_FIXTURE(LRNLiteFixtureFloat4DOutput, "LRNIncorrectDataTypeError")
+{
+ CHECK_THROWS_AS((RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+}
+
+}