aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-02-18 15:18:27 +0000
committerJames Conroy <james.conroy@arm.com>2020-02-20 14:14:01 +0000
commitc02d4411b6bf9d755e25a1593c9fe16e7d93901e (patch)
treecd354f9b1294087777cb43a891f0d099c179e39a
parent5a0b111c5ba010d87dc535d15b454b062cb50ced (diff)
downloadarmnn-c02d4411b6bf9d755e25a1593c9fe16e7d93901e.tar.gz
IVGCVSW-2232 Pack/Stack operator support in Tensorflow Parser
* Parser support and unit tests added for Stack opertor in TfParser Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I243b33fda2762180ef880458e760a54fde64f3d8
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfParser/TensorFlowSupport.md4
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp95
-rw-r--r--src/armnnTfParser/TfParser.hpp1
-rw-r--r--src/armnnTfParser/test/Stack.cpp174
5 files changed, 274 insertions, 1 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7ce9c42801..e5876db926 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -710,6 +710,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfParser/test/TestMultiInputsOutputs.cpp
src/armnnTfParser/test/Split.cpp
src/armnnTfParser/test/Squeeze.cpp
+ src/armnnTfParser/test/Stack.cpp
src/armnnTfParser/test/Sub.cpp
src/armnnTfParser/test/StridedSlice.cpp
)
diff --git a/src/armnnTfParser/TensorFlowSupport.md b/src/armnnTfParser/TensorFlowSupport.md
index 886dc7df1d..343d7a5773 100644
--- a/src/armnnTfParser/TensorFlowSupport.md
+++ b/src/armnnTfParser/TensorFlowSupport.md
@@ -163,6 +163,10 @@ Arm NN supports split along the channel dimension for data formats NHWC and NCHW
The parser does not support all forms of broadcasting [broadcast composition](https://www.tensorflow.org/performance/xla/broadcasting), only broadcasting of scalars and 1D tensors. See the TensorFlow [subtract documentation](https://www.tensorflow.org/api_docs/python/tf/math/subtract) for more information.
+**stack**
+
+See the TensorFlow [stack documentation](https://www.tensorflow.org/api_docs/python/tf/stack) for more information.
+
## Tested networks
Arm tests these operators with the following TensorFlow fp32 neural networks:
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index d65af2365b..b5a421145a 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -376,7 +376,9 @@ const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_Ope
{ "Minimum", &TfParser::ParseMinimum },
{ "Equal", &TfParser::ParseEqual },
{ "Pad", &TfParser::ParsePad },
- { "Sub", &TfParser::ParseSub }
+ { "Sub", &TfParser::ParseSub },
+ { "Pack" , &TfParser::ParseStack },
+ { "Stack", &TfParser::ParseStack }
};
const std::list<std::string> TfParser::m_ControlInputs = {
@@ -1961,6 +1963,97 @@ ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, cons
return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
}
+ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+{
+ boost::ignore_unused(graphDef);
+ std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
+
+ unsigned int numInputs = static_cast<unsigned int>(nodes.size());
+ if (numInputs < 1)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Pack/Stack expects at least one input. Got %1% for Node %2% %3%")
+ % numInputs
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
+ }
+
+ std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
+ // Use the tensor shape of the first input as the "correct" input shape in the descriptor
+ IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
+ const TensorInfo& inputTensorInfo = input0Slot->GetTensorInfo();
+ auto numDimensions = inputTensorInfo.GetShape().GetNumDimensions();
+
+ // validate axis
+ int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
+ const int sNumDimensions = (static_cast<int>(numDimensions) + 1);
+ if (!(axis < sNumDimensions && axis >= -sNumDimensions))
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Axis index is not in range. Got %1% for Node %2% %3%")
+ % axis
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
+ }
+
+ if (axis < 0)
+ {
+ axis = static_cast<int32_t>(numDimensions) + axis + 1;
+ }
+
+ StackDescriptor stackDescriptor;
+ stackDescriptor.m_Axis = static_cast<uint32_t>(axis);
+ stackDescriptor.m_NumInputs = static_cast<uint32_t>(numInputs);
+ stackDescriptor.m_InputShape = inputTensorInfo.GetShape();
+
+ const unsigned int supportedNumDims = 4;
+ for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
+ {
+ IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
+ TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
+
+ // Double check dimensions of the tensors
+ if (inputTensorInfo.GetNumDimensions() >= supportedNumDims)
+ {
+ throw armnn::ParseException(
+ boost::str(
+ boost::format(
+ "The number of dimensions: %1% for input tensors of the "
+ "Pack/Stack op. Number of dimensions should be less than %2% %3%")
+ % inputTensorInfo.GetNumDimensions()
+ % supportedNumDims
+ % CHECK_LOCATION().AsString()));
+ }
+ }
+
+ std::vector<unsigned int> outputDimensions;
+ for (unsigned int i = 0; i < stackDescriptor.m_InputShape.GetNumDimensions(); ++i)
+ {
+ outputDimensions.push_back(stackDescriptor.m_InputShape[i]);
+ }
+ outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
+
+ // add Stack Layer
+ IConnectableLayer* const layer = m_Network->AddStackLayer(stackDescriptor, nodeDef.name().c_str());
+
+ for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
+ {
+ IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
+ inputSlot.Connect(layer->GetInputSlot(viewIndex));
+ }
+
+ layer->GetOutputSlot(0).SetTensorInfo(
+ armnn::TensorInfo(static_cast<uint32_t>(outputDimensions.size()),
+ outputDimensions.data(),
+ inputTensorInfo.GetDataType()));
+
+ return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
+}
+
unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
const TensorInfo& inputTensorInfo,
const std::string& nodeName)
diff --git a/src/armnnTfParser/TfParser.hpp b/src/armnnTfParser/TfParser.hpp
index a7d02be33d..9277d44cb1 100644
--- a/src/armnnTfParser/TfParser.hpp
+++ b/src/armnnTfParser/TfParser.hpp
@@ -170,6 +170,7 @@ private:
ParsedTfOperationPtr ParseGreater(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParsePad(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
+ ParsedTfOperationPtr ParseStack(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef& nodeDef, armnn::ActivationDescriptor& desc);
ParsedTfOperationPtr AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd = false);
ParsedTfOperationPtr AddRealDivLayer(const tensorflow::NodeDef& nodeDef);
diff --git a/src/armnnTfParser/test/Stack.cpp b/src/armnnTfParser/test/Stack.cpp
new file mode 100644
index 0000000000..b28991713d
--- /dev/null
+++ b/src/armnnTfParser/test/Stack.cpp
@@ -0,0 +1,174 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+#include <PrototxtConversions.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct StackFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit StackFixture(const armnn::TensorShape& inputShape0,
+ const armnn::TensorShape& inputShape1,
+ int axis = 0)
+ {
+ m_Prototext = R"(
+ node {
+ name: "input0"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "input1"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "output"
+ op: "Stack"
+ input: "input0"
+ input: "input1"
+ attr {
+ key: "axis"
+ value {
+ i: )";
+ m_Prototext += std::to_string(axis);
+ m_Prototext += R"(
+ }
+ }
+ })";
+
+ Setup({{"input0", inputShape0 },
+ {"input1", inputShape1 }}, {"output"});
+ }
+};
+
+struct Stack3DFixture : StackFixture
+{
+ Stack3DFixture() : StackFixture({ 3, 2, 3 }, { 3, 2, 3 }, 3 ) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(Stack3D, Stack3DFixture)
+{
+
+ RunTest<4>({ { "input0", { 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+ 13, 14, 15,
+ 16, 17, 18 } },
+ { "input1", { 19, 20, 21,
+ 22, 23, 24,
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36 } } },
+ { { "output", { 1, 19,
+ 2, 20,
+ 3, 21,
+
+ 4, 22,
+ 5, 23,
+ 6, 24,
+
+ 7, 25,
+ 8, 26,
+ 9, 27,
+
+ 10, 28,
+ 11, 29,
+ 12, 30,
+
+ 13, 31,
+ 14, 32,
+ 15, 33,
+
+ 16, 34,
+ 17, 35,
+ 18, 36 } } });
+}
+
+struct Stack3DNegativeAxisFixture : StackFixture
+{
+ Stack3DNegativeAxisFixture() : StackFixture({ 3, 2, 3 }, { 3, 2, 3 }, -1 ) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(Stack3DNegativeAxis, Stack3DNegativeAxisFixture)
+{
+
+ RunTest<4>({ { "input0", { 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+ 13, 14, 15,
+ 16, 17, 18 } },
+ { "input1", { 19, 20, 21,
+ 22, 23, 24,
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36 } } },
+ { { "output", { 1, 19,
+ 2, 20,
+ 3, 21,
+
+ 4, 22,
+ 5, 23,
+ 6, 24,
+
+ 7, 25,
+ 8, 26,
+ 9, 27,
+
+ 10, 28,
+ 11, 29,
+ 12, 30,
+
+ 13, 31,
+ 14, 32,
+ 15, 33,
+
+ 16, 34,
+ 17, 35,
+ 18, 36 } } });
+}
+
+BOOST_AUTO_TEST_SUITE_END()