aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2018-10-17 15:19:14 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:54 +0100
commitb66504b654a85c81557c5d003fb5f0d6808f1482 (patch)
treec81597f36e560f87a8401a72a3617c45aa1bdb3b
parent3dc4303c94cf3f5976e495233f663ff56089e53a (diff)
downloadarmnn-b66504b654a85c81557c5d003fb5f0d6808f1482.tar.gz
IVGCVSW-2030 and IVGCVSW-2031 Add MaxPooling support and unit test to TfLite Parser
Change-Id: I3aea8ea6d018900682d278f28a50e40cf2f963fe
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp142
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp4
-rw-r--r--src/armnnTfLiteParser/test/MaxPool2D.cpp119
4 files changed, 209 insertions, 57 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8182c22dc8..e312b36b77 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -413,6 +413,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Concatenation.cpp
src/armnnTfLiteParser/test/Conv2D.cpp
src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+ src/armnnTfLiteParser/test/MaxPool2D.cpp
src/armnnTfLiteParser/test/Reshape.cpp
src/armnnTfLiteParser/test/Softmax.cpp
src/armnnTfLiteParser/test/Squeeze.cpp
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 66746e488b..216c09014c 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -456,6 +456,7 @@ TfLiteParser::TfLiteParser()
m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
+ m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
@@ -647,63 +648,6 @@ void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operato
CHECK_LOCATION().AsString()));
}
-void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
-{
- CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
-
- const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
- const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
-
- CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
-
- Pooling2dDescriptor desc;
-
- desc.m_PoolType = PoolingAlgorithm::Average;
- desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
- desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
- desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
- desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
- desc.m_PaddingMethod = PaddingMethod::Exclude;
- desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
-
- auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
- CHECK_VALID_SIZE(inputs.size(), 1);
- armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
-
- // assuming input is NHWC
- unsigned int inputHeight = inputTensorInfo.GetShape()[1];
- unsigned int inputWidth = inputTensorInfo.GetShape()[2];
-
- CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
- CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
-
- auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
- CHECK_VALID_SIZE(outputs.size(), 1);
- armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
-
- auto layerName = boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
- IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
-
- BOOST_ASSERT(layer != nullptr);
-
- // add permute layers to swizzle the input and deswizzle the output
- std::pair<IConnectableLayer*, IConnectableLayer*> permuteLayers =
- SwizzleInDeswizzleOut(*m_Network, layer, 0, inputTensorInfo, 0, outputTensorInfo);
-
- // register the input connection slots for the layer, connections are made after all layers have been created
- // only the tensors for the inputs are relevant, exclude the const tensors
- auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- RegisterInputSlots(subgraphIndex, operatorIndex, permuteLayers.first, {inputTensorIndexes[0]});
-
- // we need to add the activation layer and fortunately we don't need to care about the data layout
- // beause the activation function is element-wise, so it is OK to have the activation after the trailing
- // swizzle layer
- layer = AddFusedActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
- // register the output connection slots for the layer, connections are made after all layers have been created
- auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
- RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
-}
-
void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
@@ -857,6 +801,90 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
+void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
+{
+ ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
+}
+
+void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
+{
+ ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
+}
+
+void TfLiteParser::ParsePool(size_t subgraphIndex,
+ size_t operatorIndex,
+ PoolingAlgorithm algorithm)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
+
+ CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
+
+ std::string layerName;
+
+ switch (algorithm)
+ {
+ case PoolingAlgorithm::Average:
+ layerName =
+ boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
+ break;
+ case PoolingAlgorithm::Max:
+ layerName =
+ boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
+ break;
+ default:
+ BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
+ }
+
+ Pooling2dDescriptor desc;
+
+ desc.m_PoolType = algorithm;
+ desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
+ desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
+ desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
+ desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
+ desc.m_PaddingMethod = PaddingMethod::Exclude;
+ desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+
+ // assuming input is NHWC
+ unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
+ CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+
+ IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
+
+ BOOST_ASSERT(layer != nullptr);
+
+ // add permute layers to swizzle the input and deswizzle the output
+ std::pair<IConnectableLayer*, IConnectableLayer*> permuteLayers =
+ SwizzleInDeswizzleOut(*m_Network, layer, 0, inputTensorInfo, 0, outputTensorInfo);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, permuteLayers.first, {inputTensorIndexes[0]});
+
+ // we need to add the activation layer and fortunately we don't need to care about the data layout
+ // beause the activation function is element-wise, so it is OK to have the activation after the trailing
+ // swizzle layer
+ layer = AddFusedActivationLayer(permuteLayers.second, 0, options->fused_activation_function);
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 620648a0c3..35f0b64419 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -6,6 +6,7 @@
#include "armnn/INetwork.hpp"
#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#include "armnn/Types.hpp"
#include <schema_generated.h>
#include <functional>
@@ -93,12 +94,15 @@ private:
void ParseConcatenation(size_t subgraphIndex, size_t operatorIndex);
void ParseConv2D(size_t subgraphIndex, size_t operatorIndex);
void ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex);
+ void ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex);
void ParseRelu(size_t subgraphIndex, size_t operatorIndex);
void ParseRelu6(size_t subgraphIndex, size_t operatorIndex);
void ParseReshape(size_t subgraphIndex, size_t operatorIndex);
void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
+ void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm);
+
void RegisterProducerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IOutputSlot* slot);
void RegisterConsumerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IInputSlot* slot);
void RegisterInputSlots(size_t subgraphIndex,
diff --git a/src/armnnTfLiteParser/test/MaxPool2D.cpp b/src/armnnTfLiteParser/test/MaxPool2D.cpp
new file mode 100644
index 0000000000..06bf7806cc
--- /dev/null
+++ b/src/armnnTfLiteParser/test/MaxPool2D.cpp
@@ -0,0 +1,119 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <boost/test/unit_test.hpp>
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#include "ParserFlatbuffersFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct MaxPool2DFixture : public ParserFlatbuffersFixture
+{
+ explicit MaxPool2DFixture(std::string inputdim, std::string outputdim, std::string dataType)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "MAX_POOL_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": )"
+ + outputdim
+ + R"(,
+ "type": )"
+ + dataType
+ + R"(,
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": )"
+ + inputdim
+ + R"(,
+ "type": )"
+ + dataType
+ + R"(,
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ }
+ ],
+ "description": "MaxPool2D test.",
+ "buffers" : [ {}, {} ]
+ })";
+
+ SetupSingleInputSingleOutput("InputTensor", "OutputTensor");
+ }
+};
+
+
+struct MaxPoolLiteFixtureUint1DOutput : MaxPool2DFixture
+{
+ MaxPoolLiteFixtureUint1DOutput() : MaxPool2DFixture("[ 1, 2, 2, 1 ]", "[ 1, 1, 1, 1 ]", "UINT8") {}
+};
+
+struct MaxPoolLiteFixtureFloat1DOutput : MaxPool2DFixture
+{
+ MaxPoolLiteFixtureFloat1DOutput() : MaxPool2DFixture("[ 1, 2, 2, 1 ]", "[ 1, 1, 1, 1 ]", "FLOAT32") {}
+};
+
+struct MaxPoolLiteFixtureUint2DOutput : MaxPool2DFixture
+{
+ MaxPoolLiteFixtureUint2DOutput() : MaxPool2DFixture("[ 1, 4, 4, 1 ]", "[ 1, 2, 2, 1 ]", "UINT8") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput)
+{
+ RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 });
+}
+
+BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput)
+{
+ RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f });
+}
+
+BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput)
+{
+ RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
+}
+
+BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput)
+{
+ BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+}
+
+BOOST_AUTO_TEST_SUITE_END()