aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2018-12-27 11:23:44 +0000
committerSaoirse Stewart Arm <saoirse.stewart@arm.com>2019-01-07 10:42:57 +0000
commit2ad6cb486164ff3aabe4e9ecabc47f08da48da35 (patch)
tree57ad464aa77179d9e93d7e0c26830d67464667a6
parent747ef82c88f9afe14a8b80b6b3b34118353e97f2 (diff)
downloadarmnn-2ad6cb486164ff3aabe4e9ecabc47f08da48da35.tar.gz
IVGCVSW-2384 Add Split parser function to Tensor flow parser
* Added Unit test * Updated TensorFlowSupport.md file Change-Id: I5f07de5e91ffb681c0ad17c7c73ee0326e7f1e0a
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfParser/TensorFlowSupport.md4
-rwxr-xr-x[-rw-r--r--]src/armnnTfParser/TfParser.cpp104
-rw-r--r--src/armnnTfParser/TfParser.hpp1
-rw-r--r--src/armnnTfParser/test/Split.cpp114
5 files changed, 224 insertions, 0 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 22725aeef4..a00a44a6ce 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -421,6 +421,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfParser/test/FullyConnected.cpp
src/armnnTfParser/test/Constant.cpp
src/armnnTfParser/test/TestMultiInputsOutputs.cpp
+ src/armnnTfParser/test/Split.cpp
src/armnnTfParser/test/Squeeze.cpp
src/armnnTfParser/test/Sub.cpp)
endif()
diff --git a/src/armnnTfParser/TensorFlowSupport.md b/src/armnnTfParser/TensorFlowSupport.md
index 59510d02a8..edcf409cb8 100644
--- a/src/armnnTfParser/TensorFlowSupport.md
+++ b/src/armnnTfParser/TensorFlowSupport.md
@@ -108,6 +108,10 @@ The parser only supports `ResizeMethod.BILINEAR` with `align_corners=False`. See
The parser only supports 2D inputs and does not support selecting the `softmax` dimension. See the TensorFlow [softmax documentation](https://www.tensorflow.org/api_docs/python/tf/nn/softmax) for more information.
+**split**
+
+Arm NN supports split along the channel dimension for data formats NHWC and NCHW.
+
**maximum**
where maximum is used in one of the following ways
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 7a213c0909..2d31842205 100644..100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -350,6 +350,7 @@ const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_Ope
{ "Sigmoid", &TfParser::ParseSigmoid },
{ "Softmax", &TfParser::ParseSoftmax },
{ "Softplus", &TfParser::ParseSoftplus },
+ { "Split", &TfParser::ParseSplit },
{ "Tanh", &TfParser::ParseTanh },
{ "MaxPool", &TfParser::ParseMaxPool },
{ "AvgPool", &TfParser::ParseAvgPool },
@@ -2461,6 +2462,109 @@ ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
}
+ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
+ const tensorflow::GraphDef& graphDef)
+{
+ boost::ignore_unused(graphDef);
+
+ std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
+ unsigned int numInputs = static_cast<unsigned int>(nodes.size());
+ std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
+
+ // The last input is the axis for split operation.
+ if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports split with constant axis. "
+ "Input %1%. Node %2% %3%")
+ % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
+ }
+ ParsedConstTfOperation<int32_t>* shapeNode =
+ boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
+
+ // Get the axis tensor data
+ std::vector<int32_t> axisTensorData;
+ shapeNode->GetConstTensor(axisTensorData);
+
+ // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
+ const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
+
+ // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
+ if (splitDim == 0 || splitDim == 2)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Dimension %1% for split is not supported by Armnn. "
+ "Node %2% %3%")
+ % splitDim
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
+ }
+
+ // As Armnn only supports splitter outputs of the same shape, therefore num_splits will be limited to an integer.
+ uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_or_size_splits");
+
+ IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
+ TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
+
+ if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
+ {
+ throw armnn::ParseException(
+ boost::str(
+ boost::format(
+ "The number of dimensions: %1% for input tensors of the "
+ "splitter op should be %2% %3%")
+ % inputTensorInfo.GetNumDimensions()
+ % MaxNumOfTensorDimensions
+ % CHECK_LOCATION().AsString()));
+ }
+ auto inputDimSize = inputTensorInfo.GetNumDimensions();
+
+ std::vector<unsigned int> splitterDimSizes(inputDimSize);
+
+ // Add current input shape to splitterDimSizes
+ for (unsigned int i = 0; i < inputDimSize; ++i)
+ {
+ splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
+ }
+
+ if (splitterDimSizes[splitDim] % num_split != 0)
+ {
+ throw ParseException("Number of splits must evenly divide the dimension");
+ }
+ splitterDimSizes[splitDim] /= num_split;
+
+ SplitterDescriptor splitDesc(num_split);
+ for (unsigned int g = 0; g < num_split; ++g)
+ {
+ // Set the size of the views.
+ for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
+ {
+ splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
+ }
+ splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
+ }
+
+ IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
+
+ inputSlot.Connect(layer->GetInputSlot(0));
+
+ TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
+ splitterDimSizes.data());
+
+ for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
+ {
+ layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
+ }
+
+ return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
+}
+
ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
diff --git a/src/armnnTfParser/TfParser.hpp b/src/armnnTfParser/TfParser.hpp
index 20c5233c8e..b8fab41c74 100644
--- a/src/armnnTfParser/TfParser.hpp
+++ b/src/armnnTfParser/TfParser.hpp
@@ -152,6 +152,7 @@ private:
ParsedTfOperationPtr ParseSigmoid(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParseSoftmax(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParseSoftplus(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
+ ParsedTfOperationPtr ParseSplit(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParseMaxPool(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParseAvgPool(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
diff --git a/src/armnnTfParser/test/Split.cpp b/src/armnnTfParser/test/Split.cpp
new file mode 100644
index 0000000000..de6b5d861e
--- /dev/null
+++ b/src/armnnTfParser/test/Split.cpp
@@ -0,0 +1,114 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct SplitFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ SplitFixture() {
+ m_Prototext =
+ "node { \n"
+ " name: \"graphInput\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"shape\" \n"
+ " value { \n"
+ " shape { \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " node {"
+ " name: \"splitInput\" \n"
+ " op: \"Const\" \n"
+ "attr {\n"
+ " key: \"dtype\" \n"
+ " value {"
+ " type: DT_INT32"
+ " }"
+ "}"
+ "attr {"
+ " key: \"value\"\n"
+ " value { "
+ " tensor {"
+ " dtype: DT_INT32"
+ " tensor_shape {"
+ "}"
+ "int_val: 1"
+ "}"
+ "}"
+ "}"
+ "}"
+ "node { \n"
+ " name: \"Split\" \n"
+ " op: \"Split\" \n"
+ "input: \"graphInput\"\n"
+ "input: \"splitInput\"\n"
+ "attr { \n "
+ "key: \"T\"\n"
+ "value {\n"
+ "type: DT_FLOAT\n"
+ " }\n"
+ "}\n"
+ "\n"
+ " attr { \n"
+ " key: \"num_or_size_splits\" \n"
+ " value { \n"
+ " i:2 \n "
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ "name: \"Relu_1\"\n"
+ "op: \"Relu\"\n"
+ "input: \"Split:0\"\n"
+ "attr { \n "
+ "key: \"T\"\n"
+ "value {\n"
+ "type: DT_FLOAT\n"
+ " }\n"
+ "}\n"
+ "}\n"
+ "node { \n"
+ "name: \"Relu_2\"\n"
+ "op: \"Relu\"\n"
+ "input: \"Split:1\"\n"
+ "attr { \n "
+ "key: \"T\"\n"
+ "value {\n"
+ "type: DT_FLOAT\n"
+ " }\n"
+ "}\n"
+ "}\n";
+
+ Setup( { { "graphInput", { 1, 2, 2 , 2} } },
+ { "Relu_1", "Relu_2" });
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitTwo, SplitFixture)
+{
+ BOOST_TEST(
+ (m_Parser->GetNetworkOutputBindingInfo("Relu_1").second.GetShape() == armnn::TensorShape({ 1, 1, 2, 2 })));
+
+ BOOST_TEST(
+ (m_Parser->GetNetworkOutputBindingInfo("Relu_2").second.GetShape() == armnn::TensorShape({ 1, 1, 2, 2 })));
+
+ RunTest<4>({ { "graphInput", { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f, 1.75f } } },
+ { { "Relu_1", { 0.0f, 0.0f, 1.25f, 0.0f } },
+ { "Relu_2", { 0.0f, 0.5f, 0.0f, 1.75f } } });
+}
+
+BOOST_AUTO_TEST_SUITE_END()