From 7515d0730acc926316886a85401bcd36f640627e Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Wed, 16 Dec 2020 12:50:01 +0000 Subject: IVGCVSW-5614 Enable Hard Swish and Elu activations * Enabled Hard Swish and Elu in TfLiteDelegate * Added support for Elu in TfLiteParser Signed-off-by: Matthew Sloyan Change-Id: If09321b58568a98e14cabce610a1586556da041e --- delegate/src/test/ActivationTest.cpp | 98 ++++++++++++++++++++++++++++-- delegate/src/test/ActivationTestHelper.hpp | 36 ++++------- 2 files changed, 107 insertions(+), 27 deletions(-) (limited to 'delegate/src/test') diff --git a/delegate/src/test/ActivationTest.cpp b/delegate/src/test/ActivationTest.cpp index f894d67372..69041d77a2 100644 --- a/delegate/src/test/ActivationTest.cpp +++ b/delegate/src/test/ActivationTest.cpp @@ -22,7 +22,6 @@ namespace armnnDelegate void ActivationReLuTest(std::vector& backends) { - std::vector inputData = { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f, 0.4f, @@ -116,6 +115,64 @@ void ActivationTanHTest(std::vector& backends) outputExpectedData); } +void ActivationEluTest(std::vector& backends) +{ + std::vector inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + // Calculate output values for input. + auto f = [](float value) + { + if (value < 0) + { + // alpha * (exp(x) - 1) + return 1 * (std::exp(value) - 1); + } + return value; + }; + std::vector outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_ELU, + backends, + inputData, + outputExpectedData); +} + +void ActivationHardSwishTest(std::vector& backends) +{ + std::vector inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + // Calculate output values for input. + auto f = [](float x) + { + // Break down the calculation to help with verification. + // hard_swish(x) = x * relu6(x+3) / 6 + // relu6(x) = min(max(x,0),6) + float reLu6_step1 = std::max((x + 3),0.0f); + float reLu6Complete = std::min(reLu6_step1, 6.0f); + float hardSwish_step1 = x * reLu6Complete; + float result = hardSwish_step1 / 6; + return result; + }; + std::vector outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_HARD_SWISH, + backends, + inputData, + outputExpectedData); +} + TEST_SUITE("Activation_CpuRefTests") { @@ -137,13 +194,24 @@ TEST_CASE ("Activation_Sigmoid_CpuRef_Test") ActivationSigmoidTest(backends); } - TEST_CASE ("Activation_TanH_CpuRef_Test") { std::vector backends = { armnn::Compute::CpuRef }; ActivationTanHTest(backends); } +TEST_CASE ("Activation_Elu_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ActivationEluTest(backends); +} + +TEST_CASE ("Activation_HardSwish_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ActivationHardSwishTest(backends); +} + } TEST_SUITE("Activation_CpuAccTests") @@ -167,13 +235,24 @@ TEST_CASE ("Activation_Sigmoid_CpuAcc_Test") ActivationSigmoidTest(backends); } - TEST_CASE ("Activation_TanH_CpuAcc_Test") { std::vector backends = { armnn::Compute::CpuAcc }; ActivationTanHTest(backends); } +TEST_CASE ("Activation_Elu_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + ActivationEluTest(backends); +} + +TEST_CASE ("Activation_HardSwish_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + ActivationHardSwishTest(backends); +} + } TEST_SUITE("Activation_GpuAccTests") @@ -197,13 +276,24 @@ TEST_CASE ("Activation_Sigmoid_GpuAcc_Test") ActivationSigmoidTest(backends); } - TEST_CASE ("Activation_TanH_GpuAcc_Test") { std::vector backends = { armnn::Compute::GpuAcc }; ActivationTanHTest(backends); } +TEST_CASE ("Activation_Elu_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + ActivationEluTest(backends); +} + +TEST_CASE ("Activation_HardSwish_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + ActivationHardSwishTest(backends); +} + } } // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/src/test/ActivationTestHelper.hpp b/delegate/src/test/ActivationTestHelper.hpp index 2368a36931..0f4d944685 100644 --- a/delegate/src/test/ActivationTestHelper.hpp +++ b/delegate/src/test/ActivationTestHelper.hpp @@ -5,6 +5,8 @@ #pragma once +#include "TestUtils.hpp" + #include #include @@ -79,7 +81,7 @@ void ActivationTest(tflite::BuiltinOperator activationOperatorCode, std::vector& expectedOutputValues) { using namespace tflite; - const std::vector inputShape { { 4, 1, 4} }; + std::vector inputShape { { 4, 1, 4} }; std::vector modelBuffer = CreateActivationTfLiteModel(activationOperatorCode, ::tflite::TensorType_FLOAT32, inputShape); @@ -108,33 +110,21 @@ void ActivationTest(tflite::BuiltinOperator activationOperatorCode, CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data - auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - tfLiteDelageInputData[i] = inputValues[i]; - } - - auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); - for (unsigned int i = 0; i < inputValues.size(); ++i) - { - armnnDelegateInputData[i] = inputValues[i]; - } + armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); + armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); + // Run EnqueWorkload CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - for (size_t i = 0; i < inputValues.size(); i++) - { - CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i])); - CHECK(tfLiteDelageOutputData[i] == doctest::Approx(armnnDelegateOutputData[i])); - } + armnnDelegate::CompareOutputData(tfLiteInterpreter, + armnnDelegateInterpreter, + inputShape, + expectedOutputValues); + + tfLiteInterpreter.reset(nullptr); + armnnDelegateInterpreter.reset(nullptr); } } // anonymous namespace \ No newline at end of file -- cgit v1.2.1