From 6f9f99024df71b6b7f7115b58d85eb100c66f3c5 Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Fri, 13 Nov 2020 13:23:15 +0000 Subject: IVGCVSW-5508 Activate compiler warnings in ArmNN TfLite Delegate Signed-off-by: Finn Williams Change-Id: I1a8e2aa618ff693c61010e6150f3ca41b8ab1201 --- delegate/CMakeLists.txt | 41 ++++++++++++++++--------- delegate/src/Activation.hpp | 9 ++++++ delegate/src/ArgMinMax.hpp | 8 +++++ delegate/src/BatchSpace.hpp | 12 ++++++++ delegate/src/Comparison.hpp | 2 ++ delegate/src/Control.hpp | 8 +++++ delegate/src/DelegateUtils.hpp | 50 ++++++++++++++++++------------- delegate/src/Fill.hpp | 8 +++++ delegate/src/FullyConnected.hpp | 1 + delegate/src/Gather.hpp | 8 +++++ delegate/src/Lstm.hpp | 8 +++++ delegate/src/Normalization.hpp | 8 +++++ delegate/src/Pad.hpp | 8 +++++ delegate/src/Quantization.hpp | 3 ++ delegate/src/Redefine.hpp | 20 +++++++++++++ delegate/src/Resize.hpp | 1 + delegate/src/Round.hpp | 8 +++++ delegate/src/Slice.hpp | 8 +++++ delegate/src/Softmax.hpp | 8 +++++ delegate/src/SpaceDepth.hpp | 14 +++++++++ delegate/src/Transpose.hpp | 5 +++- delegate/src/armnn_delegate.cpp | 18 ++++++----- delegate/src/test/Pooling2dTestHelper.hpp | 2 +- 23 files changed, 213 insertions(+), 45 deletions(-) diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index de6566ac33..d4de4c23fe 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -6,7 +6,7 @@ cmake_minimum_required (VERSION 3.8.0) project(armnnDelegate) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/") @@ -72,9 +72,18 @@ find_package(TfLite REQUIRED MODULE) target_link_libraries(armnnDelegate ${TfLite_LIB}) -target_include_directories(armnnDelegate - PRIVATE - ${TfLite_INCLUDE_DIR}) +# Various tflite header files are not warning clean +# We can't change compilation flags on header files directly, so we need to add them to an interface library first +add_library(tflite_headers INTERFACE) +target_include_directories(tflite_headers INTERFACE $ + $) + +target_compile_options(tflite_headers INTERFACE $<$:-Wno-conversion + -Wno-sign-conversion + -Wno-unused-parameter + -Wno-unused-function>) + +target_link_libraries(armnnDelegate tflite_headers) ## Add Flatbuffers dependency find_package(Flatbuffers REQUIRED MODULE) @@ -82,9 +91,14 @@ find_package(Flatbuffers REQUIRED MODULE) target_link_libraries(armnnDelegate ${Flatbuffers_LIB}) -target_include_directories(armnnDelegate - PRIVATE - ${Flatbuffers_INCLUDE_DIR}) +# include/flatbuffers/flatbuffers.h is not warning clean +# We can't change compilation flags on header files directly, so we need to add them to an interface library first +add_library(flatbuffer_headers INTERFACE) +target_include_directories(flatbuffer_headers INTERFACE $ + $) +target_compile_options(flatbuffer_headers INTERFACE $<$:-Wno-sign-conversion>) + +target_link_libraries(armnnDelegate flatbuffer_headers) option(BUILD_UNIT_TESTS "Build unit tests" ON) if(BUILD_UNIT_TESTS) @@ -121,20 +135,19 @@ if(BUILD_UNIT_TESTS) target_link_libraries(DelegateUnitTests armnnDelegate) target_link_libraries(DelegateUnitTests Armnn::armnnUtils) - target_include_directories(DelegateUnitTests - PRIVATE - ${TfLite_INCLUDE_DIR}) +target_link_libraries(DelegateUnitTests tflite_headers) + +target_link_libraries(DelegateUnitTests flatbuffer_headers) - target_include_directories(DelegateUnitTests - PRIVATE - ${Flatbuffers_INCLUDE_DIR}) endif() #################################################### ## Export targets set(armnn_delegate_export_targets) list(APPEND armnn_delegate_export_targets - armnnDelegate) + armnnDelegate + tflite_headers + flatbuffer_headers) install( TARGETS ${armnn_delegate_export_targets} diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp index bd80849295..1ffa7d7f8c 100644 --- a/delegate/src/Activation.hpp +++ b/delegate/src/Activation.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,13 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData, int nodeIndex, int32_t comparisonOperatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + comparisonOperatorCode); + + return kTfLiteError; } diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp index 4d454e10bb..367ef2ed14 100644 --- a/delegate/src/ArgMinMax.hpp +++ b/delegate/src/ArgMinMax.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData, int nodeIndex, int32_t argMinMaxOperatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + argMinMaxOperatorCode); + return kTfLiteError; } diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp index 3479ddf30b..5a8a5dcd5b 100644 --- a/delegate/src/BatchSpace.hpp +++ b/delegate/src/BatchSpace.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,11 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); return kTfLiteError; } @@ -28,6 +35,11 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); return kTfLiteError; } diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp index f787a22090..2e6a7db4b6 100644 --- a/delegate/src/Comparison.hpp +++ b/delegate/src/Comparison.hpp @@ -6,6 +6,7 @@ #pragma once #include "DelegateUtils.hpp" +#include #include #include @@ -32,6 +33,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData, tfLiteContext, "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", tfLiteComparisonOperatorCode, nodeIndex); + return kTfLiteError; } diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp index 53e5f1b3ef..437b2246d5 100644 --- a/delegate/src/Control.hpp +++ b/delegate/src/Control.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitControlOperator(DelegateData& delegateData, int nodeIndex, int32_t controlOperatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + controlOperatorCode); + return kTfLiteError; } diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp index dcad38503a..e9f579b699 100644 --- a/delegate/src/DelegateUtils.hpp +++ b/delegate/src/DelegateUtils.hpp @@ -70,7 +70,7 @@ TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext, int nodeIndex) { auto numInputs = tfLiteNode->inputs->size; - if (numInputs != expectedSize) + if (static_cast(numInputs) != expectedSize) { TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d", @@ -86,7 +86,7 @@ TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext, int nodeIndex) { auto numOutputs = tfLiteNode->outputs->size; - if (numOutputs != expectedSize) + if (static_cast(numOutputs) != expectedSize) { TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d", @@ -137,7 +137,7 @@ TfLiteStatus Connect(armnn::IConnectableLayer* layer, TfLiteNode* tfLiteNode, armnnDelegate::DelegateData& data) { - ARMNN_ASSERT(tfLiteNode->outputs->size == layer->GetNumOutputSlots()); + ARMNN_ASSERT(static_cast(tfLiteNode->outputs->size) == layer->GetNumOutputSlots()); // Connect the input slots for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex) @@ -152,7 +152,7 @@ TfLiteStatus Connect(armnn::IConnectableLayer* layer, for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex) { armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex); - data.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot; + data.m_OutputSlotForNode[static_cast(tfLiteNode->outputs->data[outputIndex])] = &outputSlot; } return kTfLiteOk; @@ -175,8 +175,8 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0, } unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1); - unsigned int dimDifference = - std::abs(armnn::numeric_cast(inputDimensions0) - armnn::numeric_cast(inputDimensions1)); + unsigned int dimDifference = static_cast(std::abs(armnn::numeric_cast(inputDimensions0) - + armnn::numeric_cast(inputDimensions1))); bool input0IsSmaller = inputDimensions0 < inputDimensions1; const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1; @@ -217,22 +217,27 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0, if (input0IsSmaller) { - delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(reshapeLayer->GetInputSlot(0)); + delegateData.m_OutputSlotForNode[static_cast(tfLiteNode->inputs->data[0])] + ->Connect(reshapeLayer->GetInputSlot(0)); reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0)); - delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(startLayer->GetInputSlot(1)); + delegateData.m_OutputSlotForNode[static_cast(tfLiteNode->inputs->data[1])] + ->Connect(startLayer->GetInputSlot(1)); } else { - delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(reshapeLayer->GetInputSlot(0)); + delegateData.m_OutputSlotForNode[static_cast(tfLiteNode->inputs->data[1])] + ->Connect(reshapeLayer->GetInputSlot(0)); reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1)); - delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(startLayer->GetInputSlot(0)); + delegateData.m_OutputSlotForNode[static_cast(tfLiteNode->inputs->data[0])] + ->Connect(startLayer->GetInputSlot(0)); } // Prepare output slots for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex) { armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex); - delegateData.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot; + delegateData.m_OutputSlotForNode + [static_cast(tfLiteNode->outputs->data[outputIndex])] = &outputSlot; } return reshapeLayer; @@ -246,8 +251,7 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext, armnnDelegate::DelegateData& data) { - armnn::IOutputSlot& outputSlot = prevLayer->GetOutputSlot(outputSlotIndex); - const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo(); + const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo(); armnn::ActivationDescriptor activationDesc; @@ -314,9 +318,11 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext, // Connect and prepare output slots for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex) { - data.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]]->Connect(activationLayer->GetInputSlot(0)); + data.m_OutputSlotForNode[static_cast( + tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0)); armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex); - data.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot; + data.m_OutputSlotForNode[static_cast( + tfLiteNode->outputs->data[outputIndex])] = &outputSlot; } return kTfLiteOk; } @@ -347,7 +353,7 @@ armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor) case kTfLiteInt32: return armnn::DataType::Signed32; default: - throw armnn::Exception("TfLiteArmnnDelegate: Unsupported data type: " + tfLiteTensor.type); + throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]); } } @@ -364,17 +370,19 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, } else { - std::vector tensorDims(tensorDimensionSize); + std::vector tensorDims(static_cast(tensorDimensionSize)); bool dimensionsSpecificity[5] = { true, true, true, true, true }; - for (unsigned int i = 0; i < tensorDimensionSize; ++i) { + for (unsigned int i = 0; i < static_cast(tensorDimensionSize); ++i) { auto dim = tfLiteTensor.dims->data[i]; if (dim == 0) { dimensionsSpecificity[i] = false; } - tensorDims[i] = dim; + tensorDims[i] = static_cast(dim); } - armnn::TensorShape tensorShape(tensorDimensionSize, tensorDims.data(), dimensionsSpecificity); + armnn::TensorShape tensorShape(static_cast(tensorDimensionSize), + tensorDims.data(), + dimensionsSpecificity); ret = armnn::TensorInfo(tensorShape, type); } @@ -387,7 +395,7 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, if (affineQuantization->scale->size > 1) { std::vector quantizationScales; - for (unsigned int i = 1; i < affineQuantization->scale->size; ++i) + for (unsigned int i = 1; i < static_cast(affineQuantization->scale->size); ++i) { quantizationScales.push_back(affineQuantization->scale->data[i]); } diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp index 6e63d6d256..99c3c625c2 100644 --- a/delegate/src/Fill.hpp +++ b/delegate/src/Fill.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); + return kTfLiteError; } diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp index 48bf06f94a..b79f6a2bb2 100644 --- a/delegate/src/FullyConnected.hpp +++ b/delegate/src/FullyConnected.hpp @@ -6,6 +6,7 @@ #pragma once #include "DelegateUtils.hpp" +#include #include #include diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp index a004f2cf12..98d8dc9656 100644 --- a/delegate/src/Gather.hpp +++ b/delegate/src/Gather.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData, int nodeIndex, int32_t gatherOperatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + gatherOperatorCode); + return kTfLiteError; } diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp index f151c43573..b81b2565b1 100644 --- a/delegate/src/Lstm.hpp +++ b/delegate/src/Lstm.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); + return kTfLiteError; } diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp index 7a73e6154a..4c18b364cc 100644 --- a/delegate/src/Normalization.hpp +++ b/delegate/src/Normalization.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitNormalizationOperator(DelegateData& delegateData, int nodeIndex, int32_t normalizationOperatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + normalizationOperatorCode); + return kTfLiteError; } diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp index cb7652fb7c..2134232b61 100644 --- a/delegate/src/Pad.hpp +++ b/delegate/src/Pad.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData, int nodeIndex, int32_t padOperatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + padOperatorCode); + return kTfLiteError; } diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp index 4adbd11616..565f1e37c0 100644 --- a/delegate/src/Quantization.hpp +++ b/delegate/src/Quantization.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -40,6 +42,7 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData, tfLiteContext, "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", tfLiteDequantizeOperatorCode, nodeIndex); + return kTfLiteError; } diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp index 89a24b0aa0..755bb97494 100644 --- a/delegate/src/Redefine.hpp +++ b/delegate/src/Redefine.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); + return kTfLiteError; } @@ -28,6 +36,12 @@ TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); + return kTfLiteError; } @@ -37,6 +51,12 @@ TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); + return kTfLiteError; } diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp index f91cdb04a0..626112aa37 100644 --- a/delegate/src/Resize.hpp +++ b/delegate/src/Resize.hpp @@ -6,6 +6,7 @@ #pragma once #include "DelegateUtils.hpp" +#include #include diff --git a/delegate/src/Round.hpp b/delegate/src/Round.hpp index e3e2b4ca3a..3335d0b337 100644 --- a/delegate/src/Round.hpp +++ b/delegate/src/Round.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitFloorOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); + return kTfLiteError; } diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp index 433a95e7bf..0311abf41c 100644 --- a/delegate/src/Slice.hpp +++ b/delegate/src/Slice.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData, int nodeIndex, int32_t sliceOperatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + sliceOperatorCode); + return kTfLiteError; } diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp index b22fa9bf67..ddadbc73c8 100644 --- a/delegate/src/Softmax.hpp +++ b/delegate/src/Softmax.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData, int nodeIndex, int32_t softmaxOperatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + softmaxOperatorCode); + return kTfLiteError; } diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp index 4f69a73281..603e0f2fff 100644 --- a/delegate/src/SpaceDepth.hpp +++ b/delegate/src/SpaceDepth.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -19,6 +21,12 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); + return kTfLiteError; } @@ -28,6 +36,12 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { + armnn::IgnoreUnused(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + operatorCode); + return kTfLiteError; } diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp index c44c0d2773..37005e0b4d 100644 --- a/delegate/src/Transpose.hpp +++ b/delegate/src/Transpose.hpp @@ -5,6 +5,8 @@ #pragma once +#include + #include #include #include @@ -31,6 +33,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData, "TfLiteArmnnDelegate: Dynamic input tensors are not supported in " "operator #%d node #%d: ", tfliteTransposeOperatorCode, nodeIndex); + return kTfLiteError; } @@ -66,7 +69,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData, armnn::TransposeDescriptor descriptor(armnn::PermutationVector( reinterpret_cast (permTensorDataPtr), - (armnn::PermutationVector::SizeType)(numEl))); + static_cast(numEl))); bool isSupported = false; diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp index 3380c84d0b..d2b1796708 100644 --- a/delegate/src/armnn_delegate.cpp +++ b/delegate/src/armnn_delegate.cpp @@ -67,6 +67,7 @@ TfLiteStatus DoPrepare(TfLiteContext* tfLiteContext, TfLiteDelegate* tfLiteDeleg static const TfLiteRegistration kArmnnSubgraphRegistration = { // ArmnnSubgraph Init .init = [](TfLiteContext* tfLiteContext, const char* buffer, size_t length) -> void* { + armnn::IgnoreUnused(length); const TfLiteDelegateParams* parameters = reinterpret_cast(buffer); return static_cast(ArmnnSubgraph::Create( @@ -74,6 +75,7 @@ TfLiteStatus DoPrepare(TfLiteContext* tfLiteContext, TfLiteDelegate* tfLiteDeleg }, // ArmnnSubgraph Free .free = [](TfLiteContext* tfLiteContext, void* buffer) -> void { + armnn::IgnoreUnused(tfLiteContext); if (buffer != nullptr) { delete static_cast(buffer); @@ -208,7 +210,7 @@ TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData, const TfLiteIntArray* inputs, std::vector& inputBindings) { - const size_t numInputs = inputs->size; + const size_t numInputs = static_cast(inputs->size); for (unsigned int i = 0; i < numInputs; ++i) { const int32_t tensorId = inputs->data[i]; @@ -227,7 +229,7 @@ TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData, outputSlot.SetTensorInfo(tensorInfo); // Store for creating connections - delegateData.m_OutputSlotForNode[tensorId] = &outputSlot; + delegateData.m_OutputSlotForNode[static_cast(tensorId)] = &outputSlot; inputBindings.push_back(std::make_pair(bindingId, tensorInfo)); } @@ -240,7 +242,7 @@ TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData, const TfLiteIntArray* outputs, std::vector& outputBindings) { - const size_t numOutputs = outputs->size; + const size_t numOutputs = static_cast(outputs->size); for (unsigned int i = 0; i < numOutputs; ++i) { const int32_t tensorId = outputs->data[i]; @@ -250,8 +252,8 @@ TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData, armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId); auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor); - ARMNN_ASSERT(delegateData.m_OutputSlotForNode[tensorId] != nullptr); - delegateData.m_OutputSlotForNode[tensorId]->Connect(layer->GetInputSlot(0)); + ARMNN_ASSERT(delegateData.m_OutputSlotForNode[static_cast(tensorId)] != nullptr); + delegateData.m_OutputSlotForNode[static_cast(tensorId)]->Connect(layer->GetInputSlot(0)); outputBindings.push_back(std::make_pair(bindingId, tensorInfo)); } @@ -272,7 +274,6 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext, DelegateData delegateData(delegate->m_Options.GetBackends()); // Build ArmNN Network - using NetworkOptions = std::vector; armnn::NetworkOptions networkOptions = {}; armnn::NetworkId networkId; delegateData.m_Network = armnn::INetwork::Create(networkOptions); @@ -300,12 +301,12 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext, if (tfLiteContext->GetNodeAndRegistration( tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk) { - throw armnn::Exception("TfLiteArmnnDelegate: Unable to get node registration: " + nodeIndex); + throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to get node registration: " [ nodeIndex]); } if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk) { - throw armnn::Exception("TfLiteArmnnDelegate: Unable to parse node: " + nodeIndex); + throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to parse node: " [ nodeIndex]); } } @@ -359,6 +360,7 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext, TfLiteStatus ArmnnSubgraph::Prepare(TfLiteContext* tfLiteContext) { + armnn::IgnoreUnused(tfLiteContext); return kTfLiteOk; } diff --git a/delegate/src/test/Pooling2dTestHelper.hpp b/delegate/src/test/Pooling2dTestHelper.hpp index a344650814..f107d9473f 100644 --- a/delegate/src/test/Pooling2dTestHelper.hpp +++ b/delegate/src/test/Pooling2dTestHelper.hpp @@ -191,7 +191,7 @@ void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode, auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId); - for (size_t i = 0; i < tfLiteDelegateOutputTensor->dims->size; i++) + for (size_t i = 0; i < static_cast(tfLiteDelegateOutputTensor->dims->size); i++) { CHECK(outputShape[i] == armnnDelegateOutputTensor->dims->data[i]); CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]); -- cgit v1.2.1