aboutsummaryrefslogtreecommitdiff
path: root/delegate
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-12-16 12:50:01 +0000
committerJim Flynn <jim.flynn@arm.com>2020-12-17 12:20:58 +0000
commit7515d0730acc926316886a85401bcd36f640627e (patch)
tree97b312e641e7419b14dd6f7b1686afc6b56e788b /delegate
parentdf82f3abc8acc90acfd56f18744a8cae201a759a (diff)
downloadarmnn-7515d0730acc926316886a85401bcd36f640627e.tar.gz
IVGCVSW-5614 Enable Hard Swish and Elu activations
* Enabled Hard Swish and Elu in TfLiteDelegate * Added support for Elu in TfLiteParser Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: If09321b58568a98e14cabce610a1586556da041e
Diffstat (limited to 'delegate')
-rw-r--r--delegate/TensorFlowLiteDelegateSupport.md4
-rw-r--r--delegate/src/Activation.hpp24
-rw-r--r--delegate/src/test/ActivationTest.cpp98
-rw-r--r--delegate/src/test/ActivationTestHelper.hpp36
4 files changed, 125 insertions, 37 deletions
diff --git a/delegate/TensorFlowLiteDelegateSupport.md b/delegate/TensorFlowLiteDelegateSupport.md
index c334018e49..7531834643 100644
--- a/delegate/TensorFlowLiteDelegateSupport.md
+++ b/delegate/TensorFlowLiteDelegateSupport.md
@@ -24,6 +24,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato
* EQUAL
+* ELU
+
* EXP
* FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE
@@ -34,6 +36,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato
* GREATER_OR_EQUAL
+* HARD_SWISH
+
* LESS
* LESS_OR_EQUAL
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
index b50854aca5..5f14e2c45c 100644
--- a/delegate/src/Activation.hpp
+++ b/delegate/src/Activation.hpp
@@ -49,21 +49,14 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData,
const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor))
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
{
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ",
- nodeIndex);
return kTfLiteError;
}
+
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
{
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ",
- nodeIndex);
return kTfLiteError;
}
@@ -96,6 +89,17 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData,
activationDesc.m_B = 1.0f;
break;
}
+ case kTfLiteBuiltinElu:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::Elu;
+ activationDesc.m_A = 1.0f;
+ break;
+ }
+ case kTfLiteBuiltinHardSwish:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::HardSwish;
+ break;
+ }
default:
{
return kTfLiteError;
diff --git a/delegate/src/test/ActivationTest.cpp b/delegate/src/test/ActivationTest.cpp
index f894d67372..69041d77a2 100644
--- a/delegate/src/test/ActivationTest.cpp
+++ b/delegate/src/test/ActivationTest.cpp
@@ -22,7 +22,6 @@ namespace armnnDelegate
void ActivationReLuTest(std::vector<armnn::BackendId>& backends)
{
-
std::vector<float> inputData = {
-0.1f, -0.2f, -0.3f, -0.4f,
0.1f, 0.2f, 0.3f, 0.4f,
@@ -116,6 +115,64 @@ void ActivationTanHTest(std::vector<armnn::BackendId>& backends)
outputExpectedData);
}
+void ActivationEluTest(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ // Calculate output values for input.
+ auto f = [](float value)
+ {
+ if (value < 0)
+ {
+ // alpha * (exp(x) - 1)
+ return 1 * (std::exp(value) - 1);
+ }
+ return value;
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ ActivationTest(tflite::BuiltinOperator_ELU,
+ backends,
+ inputData,
+ outputExpectedData);
+}
+
+void ActivationHardSwishTest(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ // Calculate output values for input.
+ auto f = [](float x)
+ {
+ // Break down the calculation to help with verification.
+ // hard_swish(x) = x * relu6(x+3) / 6
+ // relu6(x) = min(max(x,0),6)
+ float reLu6_step1 = std::max((x + 3),0.0f);
+ float reLu6Complete = std::min(reLu6_step1, 6.0f);
+ float hardSwish_step1 = x * reLu6Complete;
+ float result = hardSwish_step1 / 6;
+ return result;
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ ActivationTest(tflite::BuiltinOperator_HARD_SWISH,
+ backends,
+ inputData,
+ outputExpectedData);
+}
+
TEST_SUITE("Activation_CpuRefTests")
{
@@ -137,13 +194,24 @@ TEST_CASE ("Activation_Sigmoid_CpuRef_Test")
ActivationSigmoidTest(backends);
}
-
TEST_CASE ("Activation_TanH_CpuRef_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
ActivationTanHTest(backends);
}
+TEST_CASE ("Activation_Elu_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ActivationEluTest(backends);
+}
+
+TEST_CASE ("Activation_HardSwish_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ActivationHardSwishTest(backends);
+}
+
}
TEST_SUITE("Activation_CpuAccTests")
@@ -167,13 +235,24 @@ TEST_CASE ("Activation_Sigmoid_CpuAcc_Test")
ActivationSigmoidTest(backends);
}
-
TEST_CASE ("Activation_TanH_CpuAcc_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
ActivationTanHTest(backends);
}
+TEST_CASE ("Activation_Elu_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ActivationEluTest(backends);
+}
+
+TEST_CASE ("Activation_HardSwish_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ActivationHardSwishTest(backends);
+}
+
}
TEST_SUITE("Activation_GpuAccTests")
@@ -197,13 +276,24 @@ TEST_CASE ("Activation_Sigmoid_GpuAcc_Test")
ActivationSigmoidTest(backends);
}
-
TEST_CASE ("Activation_TanH_GpuAcc_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
ActivationTanHTest(backends);
}
+TEST_CASE ("Activation_Elu_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ActivationEluTest(backends);
+}
+
+TEST_CASE ("Activation_HardSwish_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ActivationHardSwishTest(backends);
+}
+
}
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ActivationTestHelper.hpp b/delegate/src/test/ActivationTestHelper.hpp
index 2368a36931..0f4d944685 100644
--- a/delegate/src/test/ActivationTestHelper.hpp
+++ b/delegate/src/test/ActivationTestHelper.hpp
@@ -5,6 +5,8 @@
#pragma once
+#include "TestUtils.hpp"
+
#include <armnn_delegate.hpp>
#include <flatbuffers/flatbuffers.h>
@@ -79,7 +81,7 @@ void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
std::vector<float>& expectedOutputValues)
{
using namespace tflite;
- const std::vector<int32_t> inputShape { { 4, 1, 4} };
+ std::vector<int32_t> inputShape { { 4, 1, 4} };
std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode,
::tflite::TensorType_FLOAT32,
inputShape);
@@ -108,33 +110,21 @@ void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
// Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
+ armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputValues);
+ armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputValues);
+
// Run EnqueWorkload
CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
// Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
- for (size_t i = 0; i < inputValues.size(); i++)
- {
- CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i]));
- CHECK(tfLiteDelageOutputData[i] == doctest::Approx(armnnDelegateOutputData[i]));
- }
+ armnnDelegate::CompareOutputData<float>(tfLiteInterpreter,
+ armnnDelegateInterpreter,
+ inputShape,
+ expectedOutputValues);
+
+ tfLiteInterpreter.reset(nullptr);
+ armnnDelegateInterpreter.reset(nullptr);
}
} // anonymous namespace \ No newline at end of file