aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfParser/TfParser.cpp
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-12-10 22:00:43 +0000
committerDerek Lamberti <derek.lamberti@arm.com>2020-01-03 15:12:19 +0000
commitbaa177f0d465fe1d4f9e1979e1611ff6b1f128e0 (patch)
tree8eff0469fcff76cbad0d8e24b78859dec0e72822 /src/armnnTfParser/TfParser.cpp
parentf143fba6e617267ed33acc8b3e1eb2130de0ffe0 (diff)
downloadarmnn-baa177f0d465fe1d4f9e1979e1611ff6b1f128e0.tar.gz
IVGCVSW-4246 Clean build of parsers with -Wextra
Change-Id: Ib00f185b431ab74fd9425d8f478bd2ddb182f74b Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'src/armnnTfParser/TfParser.cpp')
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp31
1 files changed, 29 insertions, 2 deletions
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 8c68659b95..ca98f463b5 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -17,7 +17,7 @@
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/text_format.h>
-#include "tensorflow/core/framework/graph.pb.h"
+#include <tensorflow/core/framework/graph.pb.h>
#include <boost/format.hpp>
#include <boost/core/ignore_unused.hpp>
@@ -727,6 +727,7 @@ IConnectableLayer* TfParser::CreateAdditionLayer(
ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
if (numberOfInputs < 2)
{
@@ -806,6 +807,7 @@ ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, con
ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
// If one of the inputs is a MatMul and the other is a const, then we handle both nodes
@@ -835,6 +837,7 @@ ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, cons
ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
return AddAdditionLayer(nodeDef, true);
}
@@ -865,6 +868,7 @@ private:
ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
// Any requests for the output slots of this node should be forwarded to the node connected as input.
return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
@@ -1058,6 +1062,7 @@ struct InvokeParseFunction
ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
BOOST_ASSERT(nodeDef.op() == "Const");
if (nodeDef.attr().count("value") == 0)
@@ -1194,6 +1199,7 @@ unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOper
ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -1335,6 +1341,7 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -1530,6 +1537,7 @@ TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInf
ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -1550,6 +1558,7 @@ ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDe
ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
@@ -1698,6 +1707,7 @@ bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef
ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
if (inputs.size() != 2)
{
@@ -1835,6 +1845,7 @@ ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
@@ -1871,6 +1882,7 @@ ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
IOutputSlot* input0Slot = inputLayers.first;
IOutputSlot* input1Slot = inputLayers.second;
@@ -1884,6 +1896,7 @@ ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
IOutputSlot* input0Slot = inputLayers.first;
IOutputSlot* input1Slot = inputLayers.second;
@@ -1897,6 +1910,7 @@ ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
IOutputSlot* input0Slot = inputLayers.first;
IOutputSlot* input1Slot = inputLayers.second;
@@ -1908,6 +1922,7 @@ ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -1999,6 +2014,7 @@ TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
// input consists of:
// input[0] the tensor which will be padded
// input[1] the tensor holding the padding values
@@ -2073,6 +2089,7 @@ ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
// In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
@@ -2158,6 +2175,7 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
// Note: the Shape layer is handled in a special way, because:
// 1. ARMNN doesn't support int32 tensors which it outputs.
// 2. ARMNN works with statically shaped tensors which are known at parse time.
@@ -2200,6 +2218,7 @@ ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
@@ -2238,6 +2257,7 @@ ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
@@ -2376,6 +2396,7 @@ TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo i
ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -2395,6 +2416,7 @@ ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
NormalizationDescriptor normalizationDescriptor;
@@ -2440,12 +2462,15 @@ public:
ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
+
// Defers the creation of the layer (see ParsedMatMulTfOperation).
return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
}
ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
+ boost::ignore_unused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -2484,7 +2509,7 @@ ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, con
std::inserter(positiveAxisSet, positiveAxisSet.begin()),
[rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
- CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
+ CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
{
@@ -2774,6 +2799,8 @@ ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
{
+ boost::ignore_unused(graphDef);
+
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();