diff options
-rw-r--r-- | delegate/classic/src/Activation.hpp | 8 | ||||
-rw-r--r-- | delegate/classic/src/armnn_delegate.cpp | 14 | ||||
-rw-r--r-- | delegate/opaque/src/Activation.hpp | 9 | ||||
-rw-r--r-- | delegate/opaque/src/armnn_delegate.cpp | 6 | ||||
-rw-r--r-- | delegate/test/ActivationTest.cpp | 44 | ||||
-rw-r--r-- | delegate/test/ActivationTestHelper.hpp | 24 |
6 files changed, 97 insertions, 8 deletions
diff --git a/delegate/classic/src/Activation.hpp b/delegate/classic/src/Activation.hpp index e813956f6f..442ce4fac3 100644 --- a/delegate/classic/src/Activation.hpp +++ b/delegate/classic/src/Activation.hpp @@ -101,6 +101,14 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData, activationDesc.m_Function = armnn::ActivationFunction::HardSwish; break; } + case kTfLiteBuiltinLeakyRelu: + { + // Get the alpha param from builtin data + auto* leakyReluParameters = reinterpret_cast<TfLiteLeakyReluParams*>(tfLiteNode->builtin_data); + activationDesc.m_Function = armnn::ActivationFunction::LeakyReLu; + activationDesc.m_A = leakyReluParameters->alpha; + break; + } default: { return kTfLiteError; diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp index 45bea3d442..2483835989 100644 --- a/delegate/classic/src/armnn_delegate.cpp +++ b/delegate/classic/src/armnn_delegate.cpp @@ -741,10 +741,16 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, kTfLiteBuiltinL2Normalization); case kTfLiteBuiltinL2Pool2d: return VisitPooling2dOperator(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - kTfLiteBuiltinL2Pool2d); + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinL2Pool2d); + case kTfLiteBuiltinLeakyRelu: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinLeakyRelu); case kTfLiteBuiltinLess: return VisitComparisonOperator(delegateData, tfLiteContext, diff --git a/delegate/opaque/src/Activation.hpp b/delegate/opaque/src/Activation.hpp index 9fce7a12e0..f56609001a 100644 --- a/delegate/opaque/src/Activation.hpp +++ b/delegate/opaque/src/Activation.hpp @@ -166,6 +166,15 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData, activationDesc.m_Function = armnn::ActivationFunction::HardSwish; break; } + case kTfLiteBuiltinLeakyRelu: + { + // Get alpha param from builtin data + auto* leakyReluParameters = + reinterpret_cast<TfLiteLeakyReluParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + activationDesc.m_Function = armnn::ActivationFunction::LeakyReLu; + activationDesc.m_A = leakyReluParameters->alpha; + break; + } default: { return kTfLiteError; diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index 49fa30d8f0..60da293eb2 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -828,6 +828,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinL2Pool2d); + case kTfLiteBuiltinLeakyRelu: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinLeakyRelu); case kTfLiteBuiltinLess: return VisitComparisonOperator(delegateData, tfLiteContext, diff --git a/delegate/test/ActivationTest.cpp b/delegate/test/ActivationTest.cpp index 8f2f198f88..620c299803 100644 --- a/delegate/test/ActivationTest.cpp +++ b/delegate/test/ActivationTest.cpp @@ -170,6 +170,32 @@ void ActivationHardSwishTest(std::vector<armnn::BackendId>& backends) outputExpectedData); } +void ActivationLeakyReLuTest(std::vector<armnn::BackendId>& backends) +{ + std::vector<float> inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + float alpha = 0.3f; + + // Calculate output values for input. + auto f = [alpha](float value) + { + return value > 0 ? value : value * alpha; + }; + std::vector<float> outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_LEAKY_RELU, + backends, + inputData, + outputExpectedData, + alpha); +} + TEST_SUITE("Activation_CpuRefTests") { @@ -209,6 +235,12 @@ TEST_CASE ("Activation_HardSwish_CpuRef_Test") ActivationHardSwishTest(backends); } +TEST_CASE ("Activation_LeakyRelu_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ActivationLeakyReLuTest(backends); +} + } TEST_SUITE("Activation_CpuAccTests") @@ -250,6 +282,12 @@ TEST_CASE ("Activation_HardSwish_CpuAcc_Test") ActivationHardSwishTest(backends); } +TEST_CASE ("Activation_LeakyRelu_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ActivationLeakyReLuTest(backends); +} + } TEST_SUITE("Activation_GpuAccTests") @@ -291,6 +329,12 @@ TEST_CASE ("Activation_HardSwish_GpuAcc_Test") ActivationHardSwishTest(backends); } +TEST_CASE ("Activation_LeakyRelu_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ActivationLeakyReLuTest(backends); +} + } } // namespace armnnDelegate
\ No newline at end of file diff --git a/delegate/test/ActivationTestHelper.hpp b/delegate/test/ActivationTestHelper.hpp index e1901b7d9f..b0a4d6785d 100644 --- a/delegate/test/ActivationTestHelper.hpp +++ b/delegate/test/ActivationTestHelper.hpp @@ -23,7 +23,8 @@ namespace std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode, tflite::TensorType tensorType, - const std::vector <int32_t>& tensorShape) + const std::vector <int32_t>& tensorShape, + float alpha = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; @@ -42,11 +43,24 @@ std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activation // create operator const std::vector<int> operatorInputs{0}; const std::vector<int> operatorOutputs{1}; + + // builtin options + tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE; + flatbuffers::Offset<void> operatorBuiltinOption = 0; + + if (activationOperatorCode == tflite::BuiltinOperator_LEAKY_RELU) + { + operatorBuiltinOptionsType = tflite::BuiltinOptions_LeakyReluOptions; + operatorBuiltinOption = CreateLeakyReluOptions(flatBufferBuilder, alpha).Union(); + } + flatbuffers::Offset <Operator> unaryOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()), - flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size())); + flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOption); const std::vector<int> subgraphInputs{0}; const std::vector<int> subgraphOutputs{1}; @@ -78,13 +92,15 @@ std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activation void ActivationTest(tflite::BuiltinOperator activationOperatorCode, std::vector<armnn::BackendId>& backends, std::vector<float>& inputValues, - std::vector<float>& expectedOutputValues) + std::vector<float>& expectedOutputValues, + float alpha = 0) { using namespace delegateTestInterpreter; std::vector<int32_t> inputShape { { 4, 1, 4} }; std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode, ::tflite::TensorType_FLOAT32, - inputShape); + inputShape, + alpha); // Setup interpreter with just TFLite Runtime. auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); |