From baa177f0d465fe1d4f9e1979e1611ff6b1f128e0 Mon Sep 17 00:00:00 2001 From: Derek Lamberti Date: Tue, 10 Dec 2019 22:00:43 +0000 Subject: IVGCVSW-4246 Clean build of parsers with -Wextra Change-Id: Ib00f185b431ab74fd9425d8f478bd2ddb182f74b Signed-off-by: Derek Lamberti --- src/armnnTfLiteParser/TfLiteParser.cpp | 1 + src/armnnTfLiteParser/test/Unsupported.cpp | 2 +- src/armnnTfParser/TfParser.cpp | 31 ++++++++++++++++++++++++++++-- src/armnnUtils/ParserHelper.cpp | 5 +++-- src/armnnUtils/ParserHelper.hpp | 5 +++-- src/armnnUtils/test/ParserHelperTest.cpp | 28 +++++---------------------- src/backends/neon/NeonLayerSupport.cpp | 2 +- 7 files changed, 43 insertions(+), 31 deletions(-) diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 6853512c8f..9c7dda8aec 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -385,6 +385,7 @@ CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr, armnn::TensorInfo& tensorInfo, armnn::Optional permutationVector) { + boost::ignore_unused(tensorPtr); BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null"); BOOST_ASSERT_MSG(bufferPtr != nullptr, boost::str( diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp index 25abde8142..9a9cdc5156 100644 --- a/src/armnnTfLiteParser/test/Unsupported.cpp +++ b/src/armnnTfLiteParser/test/Unsupported.cpp @@ -33,7 +33,7 @@ public: void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {} - void VisitOutputLayer(const IConnectableLayer*, LayerBindingId id, const char*) override {} + void VisitOutputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {} void VisitStandInLayer(const IConnectableLayer* layer, const StandInDescriptor& descriptor, diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp index 8c68659b95..ca98f463b5 100755 --- a/src/armnnTfParser/TfParser.cpp +++ b/src/armnnTfParser/TfParser.cpp @@ -17,7 +17,7 @@ #include #include -#include "tensorflow/core/framework/graph.pb.h" +#include #include #include @@ -727,6 +727,7 @@ IConnectableLayer* TfParser::CreateAdditionLayer( ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N"); if (numberOfInputs < 2) { @@ -806,6 +807,7 @@ ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, con ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 2); // If one of the inputs is a MatMul and the other is a const, then we handle both nodes @@ -835,6 +837,7 @@ ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, cons ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); return AddAdditionLayer(nodeDef, true); } @@ -865,6 +868,7 @@ private: ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 1); // Any requests for the output slots of this node should be forwarded to the node connected as input. return std::make_unique(this, nodeDef, inputs[0].m_IndexedValue); @@ -1058,6 +1062,7 @@ struct InvokeParseFunction ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); BOOST_ASSERT(nodeDef.op() == "Const"); if (nodeDef.attr().count("value") == 0) @@ -1194,6 +1199,7 @@ unsigned int TfParser::GetConstInputIndex(const std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 2); IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); TensorInfo inputTensorInfo = inputSlot.GetTensorInfo(); @@ -1335,6 +1341,7 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 2); IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); TensorInfo inputTensorInfo = inputSlot.GetTensorInfo(); @@ -1530,6 +1537,7 @@ TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInf ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 1); IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); @@ -1550,6 +1558,7 @@ ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDe ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 5); if (!HasParsedConstTensor(inputs[1].m_IndexedValue->GetNode().name())) @@ -1698,6 +1707,7 @@ bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 2); if (inputs.size() != 2) { @@ -1835,6 +1845,7 @@ ParsedTfOperationPtr TfParser::ProcessElementwiseLayer( ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 2); IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index); @@ -1871,6 +1882,7 @@ ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::pair inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater"); IOutputSlot* input0Slot = inputLayers.first; IOutputSlot* input1Slot = inputLayers.second; @@ -1884,6 +1896,7 @@ ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::pair inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal"); IOutputSlot* input0Slot = inputLayers.first; IOutputSlot* input1Slot = inputLayers.second; @@ -1897,6 +1910,7 @@ ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::pair inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum"); IOutputSlot* input0Slot = inputLayers.first; IOutputSlot* input1Slot = inputLayers.second; @@ -1908,6 +1922,7 @@ ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 2); IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); @@ -1999,6 +2014,7 @@ TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo, ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); // input consists of: // input[0] the tensor which will be padded // input[1] the tensor holding the padding values @@ -2073,6 +2089,7 @@ ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector nodes = GetTfInputNodes(nodeDef); // In tensorflow, we have the last input of the Concat layer as the axis for concatenation. @@ -2158,6 +2175,7 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); // Note: the Shape layer is handled in a special way, because: // 1. ARMNN doesn't support int32 tensors which it outputs. // 2. ARMNN works with statically shaped tensors which are known at parse time. @@ -2200,6 +2218,7 @@ ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 2); ParsedTfOperation* inputNode = inputs[0].m_IndexedValue; @@ -2238,6 +2257,7 @@ ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 2); if (!HasParsedConstTensor(inputs[1].m_IndexedValue->GetNode().name())) @@ -2376,6 +2396,7 @@ TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo i ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 1); IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); @@ -2395,6 +2416,7 @@ ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 1); NormalizationDescriptor normalizationDescriptor; @@ -2440,12 +2462,15 @@ public: ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); + // Defers the creation of the layer (see ParsedMatMulTfOperation). return std::make_unique(this, nodeDef); } ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef) { + boost::ignore_unused(graphDef); std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 2); IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); TensorInfo inputTensorInfo = inputSlot.GetTensorInfo(); @@ -2484,7 +2509,7 @@ ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, con std::inserter(positiveAxisSet, positiveAxisSet.begin()), [rank](int i) -> unsigned int { return static_cast((i + rank) % rank); }); - CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo, positiveAxisSet, keepDims, outputTensorInfo); + CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo); if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size()) { @@ -2774,6 +2799,8 @@ ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef, ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype) { + boost::ignore_unused(graphDef); + std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 1); IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); TensorInfo inputTensorInfo = inputSlot.GetTensorInfo(); diff --git a/src/armnnUtils/ParserHelper.cpp b/src/armnnUtils/ParserHelper.cpp index 990a9b2098..88e5756867 100644 --- a/src/armnnUtils/ParserHelper.cpp +++ b/src/armnnUtils/ParserHelper.cpp @@ -51,8 +51,9 @@ void ProcessConcatInputTensorInfo(armnn::TensorInfo& inputTensorInfo, } } -void CalculateReducedOutputTensoInfo(const armnn::TensorInfo& inputTensorInfo, const armnn::TensorInfo& axisTensorInfo, - const std::set& axisSet, bool keepDims, +void CalculateReducedOutputTensoInfo(const armnn::TensorInfo& inputTensorInfo, + const std::set& axisSet, + bool keepDims, armnn::TensorInfo& outputTensorInfo) { std::vector outputShapeVector; diff --git a/src/armnnUtils/ParserHelper.hpp b/src/armnnUtils/ParserHelper.hpp index bcc1e5b2cc..d85ce2642f 100644 --- a/src/armnnUtils/ParserHelper.hpp +++ b/src/armnnUtils/ParserHelper.hpp @@ -17,8 +17,9 @@ void ProcessConcatInputTensorInfo(armnn::TensorInfo& inputTensorInfo, unsigned int& mergeDimOrigin); /// Creates a tensor info after reducing the dimensions mentioned in axisData. -void CalculateReducedOutputTensoInfo(const armnn::TensorInfo& inputTensorInfo, const armnn::TensorInfo& axisTensorInfo, - const std::set& axisSet, bool keepDims, +void CalculateReducedOutputTensoInfo(const armnn::TensorInfo& inputTensorInfo, + const std::set& axisSet, + bool keepDims, armnn::TensorInfo& outputTensorInfo); } // namespace armnnUtils diff --git a/src/armnnUtils/test/ParserHelperTest.cpp b/src/armnnUtils/test/ParserHelperTest.cpp index 122ad7649e..636e2bd3f9 100644 --- a/src/armnnUtils/test/ParserHelperTest.cpp +++ b/src/armnnUtils/test/ParserHelperTest.cpp @@ -22,54 +22,39 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest) TensorInfo inputTensorInfo(3, &inputShape[0], DataType::Float32); // Reducing all dimensions results in one single output value (one dimension) - unsigned int axisShape1[] = { 3 }; std::set axisData1 = { 0, 1, 2 }; - TensorInfo axisTensorInfo1(1, &axisShape1[0], DataType::Signed32); - TensorInfo outputTensorInfo1; - CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo1, axisData1, - keepDims, outputTensorInfo1); + CalculateReducedOutputTensoInfo(inputTensorInfo, axisData1, keepDims, outputTensorInfo1); BOOST_ASSERT(outputTensorInfo1.GetNumDimensions() == 1); BOOST_ASSERT(outputTensorInfo1.GetShape()[0] == 1); // Reducing dimension 0 results in a 3x4 size tensor (one dimension) - unsigned int axisShape2[] = { 1 }; std::set axisData2 = { 0 }; - TensorInfo axisTensorInfo2(1, &axisShape2[0], DataType::Signed32); - TensorInfo outputTensorInfo2; - CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo2, axisData2, - keepDims, outputTensorInfo2); + CalculateReducedOutputTensoInfo(inputTensorInfo, axisData2, keepDims, outputTensorInfo2); BOOST_ASSERT(outputTensorInfo2.GetNumDimensions() == 1); BOOST_ASSERT(outputTensorInfo2.GetShape()[0] == 12); // Reducing dimensions 0,1 results in a 4 size tensor (one dimension) - unsigned int axisShape3[] = { 2 }; std::set axisData3 = { 0, 1 }; - TensorInfo axisTensorInfo3(1, &axisShape3[0], DataType::Signed32); - TensorInfo outputTensorInfo3; - CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo3, axisData3, - keepDims, outputTensorInfo3); + CalculateReducedOutputTensoInfo(inputTensorInfo, axisData3, keepDims, outputTensorInfo3); BOOST_ASSERT(outputTensorInfo3.GetNumDimensions() == 1); BOOST_ASSERT(outputTensorInfo3.GetShape()[0] == 4); // Reducing dimension 0 results in a { 1, 3, 4 } dimension tensor keepDims = true; - unsigned int axisShape4[] = { 1 }; std::set axisData4 = { 0 }; - TensorInfo axisTensorInfo4(1, &axisShape4[0], DataType::Signed32); TensorInfo outputTensorInfo4; - CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo4, axisData4, - keepDims, outputTensorInfo4); + CalculateReducedOutputTensoInfo(inputTensorInfo, axisData4, keepDims, outputTensorInfo4); BOOST_ASSERT(outputTensorInfo4.GetNumDimensions() == 3); BOOST_ASSERT(outputTensorInfo4.GetShape()[0] == 1); @@ -78,14 +63,11 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest) // Reducing dimension 1, 2 results in a { 2, 1, 1 } dimension tensor keepDims = true; - unsigned int axisShape5[] = { 2 }; std::set axisData5 = { 1, 2 }; - TensorInfo axisTensorInfo5(1, &axisShape5[0], DataType::Signed32); TensorInfo outputTensorInfo5; - CalculateReducedOutputTensoInfo(inputTensorInfo, axisTensorInfo5, axisData5, - keepDims, outputTensorInfo5); + CalculateReducedOutputTensoInfo(inputTensorInfo, axisData5, keepDims, outputTensorInfo5); BOOST_ASSERT(outputTensorInfo5.GetNumDimensions() == 3); BOOST_ASSERT(outputTensorInfo5.GetShape()[0] == 2); diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index c79aa78d1b..a73837b884 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -69,7 +69,7 @@ namespace template< typename ... Args> bool IsNeonBackendSupported(Optional reasonIfUnsupported, Args... args) { - boost::ignore_unused((args)...); + boost::ignore_unused(reasonIfUnsupported, (args)...); #if defined(ARMCOMPUTENEON_ENABLED) return true; #else -- cgit v1.2.1