aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2023-01-30 14:24:15 +0000
committerryan.oshea3 <ryan.oshea3@arm.com>2023-01-30 15:46:29 +0000
commit475c7a8796cf0cdf5d22fa9f1962f279914834f6 (patch)
tree056053d676320f00f43aee0cc0c8214cba78813f
parent3ad2e14333fa0ffebe373b05ce582068c4c8f5f0 (diff)
downloadarmnn-475c7a8796cf0cdf5d22fa9f1962f279914834f6.tar.gz
IVGCVSW-7475 Fix uncaught warnings treated as errors in delegate release build
* Add initialization of activation type in delegate layers with fused activations * Remove unused variable activationOutputInfo on layers with fused activations Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: Ieb2ba00516a159639871c391069faf162cbb666d
-rw-r--r--delegate/src/Control.hpp1
-rw-r--r--delegate/src/Convolution.hpp12
-rw-r--r--delegate/src/DelegateUtils.hpp2
-rw-r--r--delegate/src/ElementwiseBinary.hpp4
-rw-r--r--delegate/src/FullyConnected.hpp4
-rw-r--r--delegate/src/Pooling.hpp7
6 files changed, 8 insertions, 22 deletions
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index fd1fdee940..2f83d2a37e 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -126,7 +126,6 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
// Verify we support the fused activation before attempting to create a layer
TfLiteFusedActivation activationType = concatenationParameters->activation;
- const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
outputTensorInfo, activationType);
if(activationStatus != kTfLiteOk)
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index 7ea3a3a987..17189027dc 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -102,12 +102,10 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType;
+ TfLiteFusedActivation activationType=kTfLiteActNone;
if (tfLiteNodeParameters)
{
activationType = tfLiteNodeParameters->activation;
-
- const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
outputTensorInfo, activationType);
if(activationStatus != kTfLiteOk)
@@ -279,12 +277,10 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType;
+ TfLiteFusedActivation activationType=kTfLiteActNone;
if (tfLiteNodeParameters)
{
activationType = tfLiteNodeParameters->activation;
-
- const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
outputTensorInfo, activationType);
if(activationStatus != kTfLiteOk)
@@ -490,12 +486,10 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType;
+ TfLiteFusedActivation activationType = kTfLiteActNone;
if (tfLiteNodeParameters)
{
activationType = tfLiteNodeParameters->activation;
-
- const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
outputTensorInfo, activationType);
if(activationStatus != kTfLiteOk)
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 91447576d0..c0bef4f994 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -342,7 +342,7 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
data.m_Backends,
isSupported,
setBackend,
- prevLayer->GetOutputSlot(0).GetTensorInfo(),
+ activationOutputInfo,
activationOutputInfo,
activationDesc);
if (!isSupported)
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index f21d6afc28..8096acfefb 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -255,12 +255,10 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType;
+ TfLiteFusedActivation activationType = kTfLiteActNone;
if (tfLiteNodeParameters)
{
activationType = tfLiteNodeParameters->activation;
-
- const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
outputTensorInfo, activationType);
if(activationStatus != kTfLiteOk)
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index ee553ce81c..337f1153a1 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -59,12 +59,10 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
// Check that we support fused activation before we attempt to create a layer
auto* tfLiteNodeParameters = reinterpret_cast<TfLiteFullyConnectedParams *>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType;
+ TfLiteFusedActivation activationType=kTfLiteActNone;
if (tfLiteNodeParameters)
{
activationType = tfLiteNodeParameters->activation;
-
- const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
outputTensorInfo, activationType);
if(activationStatus != kTfLiteOk)
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index d0a73b433b..4dc8e0da98 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -50,12 +50,10 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
auto* tfLiteNodeParameters = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType;
+ TfLiteFusedActivation activationType = kTfLiteActNone;
if (tfLiteNodeParameters)
{
activationType = tfLiteNodeParameters->activation;
-
- const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
outputTensorInfo, activationType);
if(activationStatus != kTfLiteOk)
@@ -233,7 +231,7 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
// Check activation by parsing the string from the flexbuffer map
std::string activationTypeStr = m["activation"].AsString().str();
- TfLiteFusedActivation activationType;
+ TfLiteFusedActivation activationType = kTfLiteActNone;
if (activationTypeStr == "kTfLiteActRelu")
{
@@ -264,7 +262,6 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
activationType = kTfLiteActNone;
}
- const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
outputTensorInfo, activationType);
if(activationStatus != kTfLiteOk)