From ae93173f7b37285ed107d4fa38adbe8669280e25 Mon Sep 17 00:00:00 2001 From: Tianle Cheng Date: Fri, 28 Jul 2023 11:53:04 +0100 Subject: IVGCVSW-7451 LEAKY_RELU not supported by delegate * Added LEAKY_RELU support to classic and opaque delegate * CMake files updated * Test added Signed-off-by: Tianle Cheng Change-Id: Ib9a2ce8f637b14afcd796bbae11fd3fa03653a2c --- delegate/classic/src/Activation.hpp | 8 ++++++ delegate/classic/src/armnn_delegate.cpp | 14 ++++++++--- delegate/opaque/src/Activation.hpp | 9 +++++++ delegate/opaque/src/armnn_delegate.cpp | 6 +++++ delegate/test/ActivationTest.cpp | 44 +++++++++++++++++++++++++++++++++ delegate/test/ActivationTestHelper.hpp | 24 +++++++++++++++--- 6 files changed, 97 insertions(+), 8 deletions(-) (limited to 'delegate') diff --git a/delegate/classic/src/Activation.hpp b/delegate/classic/src/Activation.hpp index e813956f6f..442ce4fac3 100644 --- a/delegate/classic/src/Activation.hpp +++ b/delegate/classic/src/Activation.hpp @@ -101,6 +101,14 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData, activationDesc.m_Function = armnn::ActivationFunction::HardSwish; break; } + case kTfLiteBuiltinLeakyRelu: + { + // Get the alpha param from builtin data + auto* leakyReluParameters = reinterpret_cast(tfLiteNode->builtin_data); + activationDesc.m_Function = armnn::ActivationFunction::LeakyReLu; + activationDesc.m_A = leakyReluParameters->alpha; + break; + } default: { return kTfLiteError; diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp index 45bea3d442..2483835989 100644 --- a/delegate/classic/src/armnn_delegate.cpp +++ b/delegate/classic/src/armnn_delegate.cpp @@ -741,10 +741,16 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, kTfLiteBuiltinL2Normalization); case kTfLiteBuiltinL2Pool2d: return VisitPooling2dOperator(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - kTfLiteBuiltinL2Pool2d); + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinL2Pool2d); + case kTfLiteBuiltinLeakyRelu: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinLeakyRelu); case kTfLiteBuiltinLess: return VisitComparisonOperator(delegateData, tfLiteContext, diff --git a/delegate/opaque/src/Activation.hpp b/delegate/opaque/src/Activation.hpp index 9fce7a12e0..f56609001a 100644 --- a/delegate/opaque/src/Activation.hpp +++ b/delegate/opaque/src/Activation.hpp @@ -166,6 +166,15 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData, activationDesc.m_Function = armnn::ActivationFunction::HardSwish; break; } + case kTfLiteBuiltinLeakyRelu: + { + // Get alpha param from builtin data + auto* leakyReluParameters = + reinterpret_cast(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + activationDesc.m_Function = armnn::ActivationFunction::LeakyReLu; + activationDesc.m_A = leakyReluParameters->alpha; + break; + } default: { return kTfLiteError; diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index 49fa30d8f0..60da293eb2 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -828,6 +828,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinL2Pool2d); + case kTfLiteBuiltinLeakyRelu: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinLeakyRelu); case kTfLiteBuiltinLess: return VisitComparisonOperator(delegateData, tfLiteContext, diff --git a/delegate/test/ActivationTest.cpp b/delegate/test/ActivationTest.cpp index 8f2f198f88..620c299803 100644 --- a/delegate/test/ActivationTest.cpp +++ b/delegate/test/ActivationTest.cpp @@ -170,6 +170,32 @@ void ActivationHardSwishTest(std::vector& backends) outputExpectedData); } +void ActivationLeakyReLuTest(std::vector& backends) +{ + std::vector inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + float alpha = 0.3f; + + // Calculate output values for input. + auto f = [alpha](float value) + { + return value > 0 ? value : value * alpha; + }; + std::vector outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_LEAKY_RELU, + backends, + inputData, + outputExpectedData, + alpha); +} + TEST_SUITE("Activation_CpuRefTests") { @@ -209,6 +235,12 @@ TEST_CASE ("Activation_HardSwish_CpuRef_Test") ActivationHardSwishTest(backends); } +TEST_CASE ("Activation_LeakyRelu_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ActivationLeakyReLuTest(backends); +} + } TEST_SUITE("Activation_CpuAccTests") @@ -250,6 +282,12 @@ TEST_CASE ("Activation_HardSwish_CpuAcc_Test") ActivationHardSwishTest(backends); } +TEST_CASE ("Activation_LeakyRelu_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + ActivationLeakyReLuTest(backends); +} + } TEST_SUITE("Activation_GpuAccTests") @@ -291,6 +329,12 @@ TEST_CASE ("Activation_HardSwish_GpuAcc_Test") ActivationHardSwishTest(backends); } +TEST_CASE ("Activation_LeakyRelu_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + ActivationLeakyReLuTest(backends); +} + } } // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/test/ActivationTestHelper.hpp b/delegate/test/ActivationTestHelper.hpp index e1901b7d9f..b0a4d6785d 100644 --- a/delegate/test/ActivationTestHelper.hpp +++ b/delegate/test/ActivationTestHelper.hpp @@ -23,7 +23,8 @@ namespace std::vector CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode, tflite::TensorType tensorType, - const std::vector & tensorShape) + const std::vector & tensorShape, + float alpha = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; @@ -42,11 +43,24 @@ std::vector CreateActivationTfLiteModel(tflite::BuiltinOperator activation // create operator const std::vector operatorInputs{0}; const std::vector operatorOutputs{1}; + + // builtin options + tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE; + flatbuffers::Offset operatorBuiltinOption = 0; + + if (activationOperatorCode == tflite::BuiltinOperator_LEAKY_RELU) + { + operatorBuiltinOptionsType = tflite::BuiltinOptions_LeakyReluOptions; + operatorBuiltinOption = CreateLeakyReluOptions(flatBufferBuilder, alpha).Union(); + } + flatbuffers::Offset unaryOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), - flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size())); + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOption); const std::vector subgraphInputs{0}; const std::vector subgraphOutputs{1}; @@ -78,13 +92,15 @@ std::vector CreateActivationTfLiteModel(tflite::BuiltinOperator activation void ActivationTest(tflite::BuiltinOperator activationOperatorCode, std::vector& backends, std::vector& inputValues, - std::vector& expectedOutputValues) + std::vector& expectedOutputValues, + float alpha = 0) { using namespace delegateTestInterpreter; std::vector inputShape { { 4, 1, 4} }; std::vector modelBuffer = CreateActivationTfLiteModel(activationOperatorCode, ::tflite::TensorType_FLOAT32, - inputShape); + inputShape, + alpha); // Setup interpreter with just TFLite Runtime. auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); -- cgit v1.2.1