aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruno Goncalves <bruno.slackware@gmail.com>2019-02-07 18:40:11 -0200
committerderek.lamberti <derek.lamberti@arm.com>2019-02-22 15:08:44 +0000
commit3f58ddb30362a927f3ff63c59545382bef77c25f (patch)
tree37b46e1f20b250c332d300a9d321053064471e0d
parentbbeae26a881af1e4f6690c45b2efbe2bef14c5cc (diff)
downloadarmnn-3f58ddb30362a927f3ff63c59545382bef77c25f.tar.gz
Add resize-bilinear parser to tf-lite
Change-Id: Id35db981b38348e5a941cfbb4cbdfe8cd617a254 Signed-off-by: Bruno Goncalves <bruno.slackware@gmail.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp37
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp1
-rw-r--r--src/armnnTfLiteParser/test/ResizeBilinear.cpp118
4 files changed, 157 insertions, 0 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0492145a8a..4455e6df21 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -553,6 +553,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Multiplication.cpp
src/armnnTfLiteParser/test/Pad.cpp
src/armnnTfLiteParser/test/Reshape.cpp
+ src/armnnTfLiteParser/test/ResizeBilinear.cpp
src/armnnTfLiteParser/test/Softmax.cpp
src/armnnTfLiteParser/test/Sub.cpp
src/armnnTfLiteParser/test/Squeeze.cpp
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 7ea85bb48e..4acd30805e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -429,6 +429,7 @@ TfLiteParser::TfLiteParser()
m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
+ m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
@@ -1359,6 +1360,42 @@ void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
+void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 2);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
+
+ // Data for the parsed tensor args (size) must be stored locally.
+ std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
+
+ BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+ ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
+
+ ResizeBilinearDescriptor desc;
+ desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
+ desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
+ desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+ auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
+ IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
+
+ TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 7d8151d722..599923244e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -102,6 +102,7 @@ private:
void ParseRelu(size_t subgraphIndex, size_t operatorIndex);
void ParseRelu6(size_t subgraphIndex, size_t operatorIndex);
void ParseReshape(size_t subgraphIndex, size_t operatorIndex);
+ void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex);
void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
void ParseSub(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/ResizeBilinear.cpp b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
new file mode 100644
index 0000000000..400dc78b67
--- /dev/null
+++ b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
@@ -0,0 +1,118 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+using armnnTfLiteParser::TfLiteParser;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct ResizeBilinearFixture : public ParserFlatbuffersFixture
+{
+ explicit ResizeBilinearFixture(const std::string & inputShape,
+ const std::string & outputShape,
+ const std::string & sizeShape,
+ const std::string & sizeData)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "RESIZE_BILINEAR" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + sizeShape + R"( ,
+ "type": "INT32",
+ "buffer": 0,
+ "name": "sizeTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"( ,
+ "type": "FLOAT32",
+ "buffer": 2,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 2 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 1, 0 ],
+ "outputs": [ 2 ],
+ "builtin_options_type": "ResizeBilinearOptions",
+ "builtin_options": {
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { "data": )" + sizeData + R"(, },
+ { },
+ { },
+ ]
+ }
+ )";
+ Setup();
+ }
+};
+
+
+struct SimpleResizeBilinearFixture : ResizeBilinearFixture
+{
+ SimpleResizeBilinearFixture()
+ : ResizeBilinearFixture("[ 1, 3, 3, 1 ]", // inputShape
+ "[ 1, 5, 5, 1 ]", // outputShape
+ "[ 2 ]", // sizeShape
+ "[ 5,0,0,0, 5,0,0,0 ]") // sizeData
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseResizeBilinear, SimpleResizeBilinearFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(
+ 0,
+ {{"InputTensor", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f }}},
+ {{"OutputTensor", { 0.0f, 0.6f, 1.2f, 1.8f, 2.0f,
+ 1.8f, 2.4f, 3.0f, 3.6f, 3.8f,
+ 3.6f, 4.2f, 4.8f, 5.4f, 5.6f,
+ 5.4f, 6.0f, 6.6f, 7.2f, 7.4f,
+ 6.0f, 6.6f, 7.2f, 7.8f, 8.0f }}}
+ );
+}
+
+BOOST_AUTO_TEST_SUITE_END()