aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfParser
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2018-12-04 10:02:08 +0000
committerLes Bell <les.bell@arm.com>2018-12-04 11:56:36 +0000
commit975c09aab8e628b8052226d7a2e2ed2b76aa6702 (patch)
tree7a4ed69df5de6b0b97125f03aba24a41720be27b /src/armnnTfParser
parenta9a1cf117072b8aa81e27bab4d1d3e356d8ea51d (diff)
downloadarmnn-975c09aab8e628b8052226d7a2e2ed2b76aa6702.tar.gz
IVGCVSW-2256 Add parser function in TensorFlow Parser (Maximum)
* Extended Maximum operator support in TF Parser * Added extra unit tests for Maximum operator in TF Parser Change-Id: I68edb43b2a3105507f2f5f028ff0e35206965dca
Diffstat (limited to 'src/armnnTfParser')
-rw-r--r--src/armnnTfParser/TfParser.cpp83
-rw-r--r--src/armnnTfParser/TfParser.hpp1
-rw-r--r--src/armnnTfParser/test/Maximum.cpp144
-rw-r--r--src/armnnTfParser/test/MaximumForLeakyRelu.cpp1
4 files changed, 209 insertions, 20 deletions
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 52ba92cad5..abf4d3f5c9 100644
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -1375,27 +1375,34 @@ bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef
return false;
}
-// For max nodes, we only support those as part of a leaky relu, i.e.,
-// as part for a max(mul(a, x), x) expression. We thus need to
-// identify one input as a multiplication with a scalar constant,
-// extract the constant and the two inputs, verify that the two other
-// inputs are the same node, and then create a leaky relu node.
-
ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
+ if (inputs.size() != 2)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Maximum expects two inputs!. Got %1% for Node %2% %3%")
+ % inputs.size()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
+ }
+
auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
IOutputSlot* outputOfLeakyRelu = nullptr;
ActivationDescriptor desc;
- // There are four possible scenarios we need to support (respectively below):
- // 1, max(mul(a, x), x)
- // 2, max(mul(x, a), x)
- // 3, max(x, mul(a, x))
- // 4, max(x, mul(x, a))
+ // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
+ // i.e. one of the four possible scenarios:
+ // 1, max(mul(a, x), x)
+ // 2, max(mul(x, a), x)
+ // 3, max(x, mul(a, x))
+ // 4, max(x, mul(x, a))
+ // These are handled by an activation layer.
if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
@@ -1411,14 +1418,9 @@ ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
}
else
{
- throw ParseException(
- boost::str(
- boost::format(
- "ArmNN currenly offers limited support for Maximum node when it can be fused to "
- "form a LeakyRelu activation as leakyrelu=max(mul(alpha, X), X). "
- "Node: %1% %2%")
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ // Anything else is just a maximum layer.
+
+ return AddMaximumLayer(nodeDef);
}
}
@@ -2183,6 +2185,49 @@ ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDe
return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
}
+ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
+{
+ std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
+
+ IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
+ IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
+
+ auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
+ auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
+
+ if (input0NumDims < input1NumDims)
+ {
+ const bool isNHWC = true;
+ input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
+ }
+ if (input1NumDims < input0NumDims)
+ {
+ const bool isNHWC = true;
+ input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
+ }
+
+ IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
+
+ input0Slot->Connect(layer->GetInputSlot(0));
+ input1Slot->Connect(layer->GetInputSlot(1));
+
+ TensorInfo outputInfo = input0Slot->GetTensorInfo();
+ std::vector<unsigned int> outputShape;
+
+ const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
+ const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
+
+ for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
+ {
+ outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
+ }
+
+ outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
+}
+
IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
{
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
diff --git a/src/armnnTfParser/TfParser.hpp b/src/armnnTfParser/TfParser.hpp
index 78cbe1d605..f57ea0518e 100644
--- a/src/armnnTfParser/TfParser.hpp
+++ b/src/armnnTfParser/TfParser.hpp
@@ -155,6 +155,7 @@ private:
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef& nodeDef, armnn::ActivationDescriptor& desc);
ParsedTfOperationPtr AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd = false);
ParsedTfOperationPtr AddRealDivLayer(const tensorflow::NodeDef& nodeDef);
+ ParsedTfOperationPtr AddMaximumLayer(const tensorflow::NodeDef& nodeDef);
private:
armnn::IConnectableLayer* AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef);
diff --git a/src/armnnTfParser/test/Maximum.cpp b/src/armnnTfParser/test/Maximum.cpp
new file mode 100644
index 0000000000..8b87b76296
--- /dev/null
+++ b/src/armnnTfParser/test/Maximum.cpp
@@ -0,0 +1,144 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct MaximumFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MaximumFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1)
+ {
+ m_Prototext = R"(
+node {
+ name: "input0"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "input1"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "output"
+ op: "Maximum"
+ input: "input0"
+ input: "input1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+
+ Setup({ { "input0", inputShape0 },
+ { "input1", inputShape1 } },
+ { "output" });
+ }
+};
+
+struct MaximumFixture4D4D : public MaximumFixture
+{
+ MaximumFixture4D4D() : MaximumFixture({ 1, 2, 2, 3 }, { 1, 2, 2, 3 }) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseMaximum4D4D, MaximumFixture4D4D)
+{
+ RunTest<4>({ { "input0", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } },
+ { "input1", { 5.0f, 1.0f, 3.0f,
+ 4.0f, 5.5f, 1.0f,
+ 2.0f, 17.0f, 18.0f,
+ 19.0f, 1.0f, 3.0f } } },
+ { { "output", { 5.0f, 1.0f, 3.0f,
+ 4.0f, 5.5f, 5.0f,
+ 6.0f, 17.0f, 18.0f,
+ 19.0f, 10.0f, 11.0f } } });
+}
+
+struct MaximumBroadcastFixture4D4D : public MaximumFixture
+{
+ MaximumBroadcastFixture4D4D() : MaximumFixture({ 1, 1, 2, 1 }, { 1, 2, 1, 3 }) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast4D4D, MaximumBroadcastFixture4D4D)
+{
+ RunTest<4>({ { "input0", { 2.0f, 4.0f } },
+ { "input1", { 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f } } },
+ { { "output", { 2.0f, 2.0f, 3.0f,
+ 4.0f, 4.0f, 4.0f,
+ 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f } } });
+}
+
+struct MaximumBroadcastFixture4D1D : public MaximumFixture
+{
+ MaximumBroadcastFixture4D1D() : MaximumFixture({ 1, 2, 2, 3 }, { 1 }) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast4D1D, MaximumBroadcastFixture4D1D)
+{
+ RunTest<4>({ { "input0", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } },
+ { "input1", { 5.0f } } },
+ { { "output", { 5.0f, 5.0f, 5.0f,
+ 5.0f, 5.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } } });
+}
+
+struct MaximumBroadcastFixture1D4D : public MaximumFixture
+{
+ MaximumBroadcastFixture1D4D() : MaximumFixture({ 1 }, { 1, 2, 2, 3 }) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast1D4D, MaximumBroadcastFixture1D4D)
+{
+ RunTest<4>({ { "input0", { 3.0f } },
+ { "input1", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } } },
+ { { "output", { 3.0f, 3.0f, 3.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } } });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/MaximumForLeakyRelu.cpp b/src/armnnTfParser/test/MaximumForLeakyRelu.cpp
index a52fe75000..05c5003399 100644
--- a/src/armnnTfParser/test/MaximumForLeakyRelu.cpp
+++ b/src/armnnTfParser/test/MaximumForLeakyRelu.cpp
@@ -36,7 +36,6 @@ struct UnsupportedMaximumFixture
name: "Maximum"
op: "Maximum"
input: "graphInput"
- input: "graphInput"
attr {
key: "dtype"
value {