aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2023-01-13 10:19:20 +0000
committerColm Donelan <colm.donelan@arm.com>2023-01-27 21:03:23 +0000
commit3ad2e14333fa0ffebe373b05ce582068c4c8f5f0 (patch)
treec597684297c84ffb71871d96a2d6c778559074c0
parent3811a97033be66f7a5d8fc3340b0899e0b60f737 (diff)
downloadarmnn-3ad2e14333fa0ffebe373b05ce582068c4c8f5f0.tar.gz
IVGCVSW-7450 Fix delegate fallback when fused activation is unsupported
In layers that support fused activations, we check for activation layer support after we already create the base layer. This breaks the fallback as we already added the base layer to the graph. * Creates ValidateFusedActivation shared function * Moves Activation validation higher in the VisitFunction Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: I239af360923f695fc374ddeaeefa24c062eaf9e8
-rw-r--r--delegate/src/Control.hpp24
-rw-r--r--delegate/src/Convolution.hpp63
-rw-r--r--delegate/src/ElementwiseBinary.hpp21
-rw-r--r--delegate/src/FullyConnected.hpp24
-rw-r--r--delegate/src/Pooling.hpp104
-rw-r--r--delegate/src/SharedFunctions.cpp77
-rw-r--r--delegate/src/SharedFunctions.hpp8
7 files changed, 256 insertions, 65 deletions
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index 02426a5616..fd1fdee940 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -99,6 +99,12 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
uint32_t inputRank = tfLiteTensors[tfLiteNode->inputs->data[0]].dims->size;
auto* concatenationParameters = reinterpret_cast<TfLiteConcatenationParams*>(tfLiteNode->builtin_data);
+
+ if(!concatenationParameters)
+ {
+ throw armnn::Exception(&"TfLiteArmnnDelegate: Concat parameters are null in: " [ nodeIndex]);
+ }
+
const unsigned int concatDimInput = static_cast<unsigned int>(
(static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
@@ -117,6 +123,17 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Verify we support the fused activation before attempting to create a layer
+ TfLiteFusedActivation activationType = concatenationParameters->activation;
+
+ const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
// Check if supported
bool isSupported = false;
armnn::BackendId setBackend;
@@ -158,14 +175,13 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
Connect(concatenationLayer, tfLiteNode, delegateData);
- if (!concatenationParameters)
+ if (activationType == kTfLiteActNone)
{
// No Activation
return kTfLiteOk;
}
- // Check activation
- TfLiteFusedActivation activationType = concatenationParameters->activation;
+ // Check and Create activation
return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
}
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index e307bb9be3..7ea3a3a987 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -1,11 +1,12 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include "DelegateUtils.hpp"
+#include "SharedFunctions.hpp"
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
@@ -100,6 +101,22 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+
+ const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ }
+
armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
armnn::TensorInfo biasTensorInfo;
@@ -198,14 +215,12 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
Connect(layer, tfLiteNode, delegateData);
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
if (!tfLiteNodeParameters)
{
// No Activation
return kTfLiteOk;
}
- // Check activation
- TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+ // Check and Create activation
return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
}
@@ -263,6 +278,22 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+
+ const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ }
+
armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
armnn::TensorInfo biasTensorInfo;
@@ -362,15 +393,13 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
Connect(layer, tfLiteNode, delegateData);
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
if (!tfLiteNodeParameters)
{
// No Activation
return kTfLiteOk;
}
- // Check activation
- TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+ // Check and create activation
return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
}
#endif
@@ -460,6 +489,22 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+
+ const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ }
+
armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
// Assuming input is NHWC
@@ -553,14 +598,12 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
Connect(layer, tfLiteNode, delegateData);
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
if (!tfLiteNodeParameters)
{
// No Activation
return kTfLiteOk;
}
- // Check activation
- TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+ // Check and create activation
return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
}
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index caf02624be..f21d6afc28 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -254,6 +254,21 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+
+ const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
if (!delegateData.m_Network)
{
switch(elementwiseBinaryOperatorCode)
@@ -361,14 +376,12 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
return kTfLiteError;
}
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
if (!tfLiteNodeParameters)
{
// No Activation
return kTfLiteOk;
}
- // Check activation
- TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+ // Check and Create Activation
return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
}
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index 2243ad0e0c..ee553ce81c 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -57,6 +57,22 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
armnn::TensorInfo weightsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ // Check that we support fused activation before we attempt to create a layer
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteFullyConnectedParams *>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+
+ const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
// Fully Connected Layer accepts two dimensional weights input
int32_t weightsDimension = static_cast<int32_t>(weightsTensorInfo.GetNumDimensions());
if (weightsDimension != 2)
@@ -221,9 +237,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
{
Connect(layer, tfLiteNode, delegateData);
}
-
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteFullyConnectedParams*>(tfLiteNode->builtin_data);
-
+
if (outputTensorInfo.GetNumDimensions() > 2)
{
layer = AddReshapeLayer(tfLiteContext, tfLiteNode, layer, reshapedOutputTensorInfo, outputTensorInfo,
@@ -244,8 +258,8 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
// No Activation
return kTfLiteOk;
}
- // Check Activation
- TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+
+ // Check and Create Activation
return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
}
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index 824156742d..d0a73b433b 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -49,6 +49,22 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
+ TfLiteFusedActivation activationType;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+
+ const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ }
+
armnn::PoolingAlgorithm poolingAlgorithm;
switch(tfLitePoolingOperatorCode)
{
@@ -68,20 +84,19 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
armnn::Pooling2dDescriptor descriptor;
descriptor.m_PoolType = poolingAlgorithm;
- auto* params = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
- descriptor.m_PoolWidth = params->filter_width;
- descriptor.m_PoolHeight = params->filter_height;
- descriptor.m_StrideX = params->stride_width;
- descriptor.m_StrideY = params->stride_height;
+ descriptor.m_PoolWidth = tfLiteNodeParameters->filter_width;
+ descriptor.m_PoolHeight = tfLiteNodeParameters->filter_height;
+ descriptor.m_StrideX = tfLiteNodeParameters->stride_width;
+ descriptor.m_StrideY = tfLiteNodeParameters->stride_height;
descriptor.m_DataLayout = armnn::DataLayout::NHWC;
unsigned int inputHeight = inputTensorInfo.GetShape()[1];
unsigned int inputWidth = inputTensorInfo.GetShape()[2];
CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
- descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
+ descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
- descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
+ descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
bool isSupported = false;
armnn::BackendId setBackend;
@@ -112,8 +127,7 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
Connect(poolingLayer, tfLiteNode, delegateData);
- // Check activation
- TfLiteFusedActivation activationType = params->activation;
+ // Check and create activation
return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
}
@@ -216,36 +230,6 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u,
descriptor.m_PadFront, descriptor.m_PadBack, padding);
- // Validate the output info.
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
- FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
- tfLiteContext,
- IsPooling3dSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Create the Layer
- armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
- poolingLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(poolingLayer != nullptr);
-
- // Create and set output slots
- armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(poolingLayer, tfLiteNode, delegateData);
// Check activation by parsing the string from the flexbuffer map
std::string activationTypeStr = m["activation"].AsString().str();
@@ -280,6 +264,46 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
activationType = kTfLiteActNone;
}
+ const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
+ outputTensorInfo, activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+
+ // Validate the output info.
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
+ FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
+ tfLiteContext,
+ IsPooling3dSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Create the Layer
+ armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
+ poolingLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(poolingLayer != nullptr);
+
+ // Create and set output slots
+ armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+ Connect(poolingLayer, tfLiteNode, delegateData);
+
return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
}
diff --git a/delegate/src/SharedFunctions.cpp b/delegate/src/SharedFunctions.cpp
index 22f578a9d7..fef970173e 100644
--- a/delegate/src/SharedFunctions.cpp
+++ b/delegate/src/SharedFunctions.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,5 +37,80 @@ TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
+TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ TfLiteFusedActivation activationType)
+{
+ armnn::ActivationDescriptor activationDesc;
+
+ switch (activationType)
+ {
+ case kTfLiteActNone:
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ case kTfLiteActRelu:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+ break;
+ }
+// The name of kTfLiteActRelu1 changed after TF Lite v2.3
+#if defined(ARMNN_POST_TFLITE_2_3)
+ case kTfLiteActReluN1To1:
+#else
+ case kTfLiteActRelu1:
+#endif
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = -1.0f;
+ break;
+ }
+ case kTfLiteActRelu6:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 6.0f;
+ activationDesc.m_B = 0.0f;
+ break;
+ }
+ case kTfLiteActSigmoid:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+ break;
+ }
+ case kTfLiteActTanh:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::TanH;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = 1.0f;
+ break;
+ }
+ default:
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
+ tfLiteContext,
+ IsActivationSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputInfo,
+ activationDesc);
+ };
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+
} // namespace armnnDelegate
diff --git a/delegate/src/SharedFunctions.hpp b/delegate/src/SharedFunctions.hpp
index bf6b603cf9..b03a63ded9 100644
--- a/delegate/src/SharedFunctions.hpp
+++ b/delegate/src/SharedFunctions.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -15,5 +15,11 @@ TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo,
const armnn::TensorInfo& outputTensorInfo);
+TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ TfLiteFusedActivation activationType);
+
} // namespace armnnDelegate