From 0cf84423f440aa2cad4b3e5f678d7a5f5b865eb4 Mon Sep 17 00:00:00 2001 From: David Monahan Date: Mon, 16 Nov 2020 15:53:03 +0000 Subject: IVGCVSW-5382 TfLiteDelegate: Implement the Activation operators * Added TfLiteDelegate implementations for ReLu, Relu6, Logistic, and TanH Activation Functions Signed-off-by: David Monahan Change-Id: Id021b4ec9c10fd4357535fe2a665f32c053dad61 --- delegate/CMakeLists.txt | 2 + delegate/src/Activation.hpp | 104 ++++++++++++-- delegate/src/test/ActivationTest.cpp | 209 +++++++++++++++++++++++++++++ delegate/src/test/ActivationTestHelper.hpp | 140 +++++++++++++++++++ 4 files changed, 447 insertions(+), 8 deletions(-) create mode 100644 delegate/src/test/ActivationTest.cpp create mode 100644 delegate/src/test/ActivationTestHelper.hpp diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index 1ea0cdd644..3c77dcf2f3 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -104,6 +104,8 @@ option(BUILD_UNIT_TESTS "Build unit tests" ON) if(BUILD_UNIT_TESTS) set(armnnDelegate_unittest_sources) list(APPEND armnnDelegate_unittest_sources + src/test/ActivationTest.cpp + src/test/ActivationTestHelper.hpp src/test/ArmnnDelegateTest.cpp src/test/ComparisonTest.cpp src/test/ComparisonTestHelper.hpp diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp index 1ffa7d7f8c..5e8d876110 100644 --- a/delegate/src/Activation.hpp +++ b/delegate/src/Activation.hpp @@ -5,7 +5,7 @@ #pragma once -#include +#include "DelegateUtils.hpp" #include #include @@ -15,20 +15,108 @@ namespace armnnDelegate { +TfLiteStatus ValidateActivationOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + armnn::ActivationDescriptor& activationDesc) +{ + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsActivationSupported, + delegateData.m_Backends, + isSupported, + inputInfo, + outputInfo, + activationDesc); + }; + + validateFunc(outputInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; +} + TfLiteStatus VisitActivationOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, - int32_t comparisonOperatorCode) + int32_t operatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - comparisonOperatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (IsDynamicTensor(tfLiteInputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ", + nodeIndex); + return kTfLiteError; + } + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + armnn::ActivationDescriptor activationDesc; + switch(operatorCode) + { + case kTfLiteBuiltinRelu: + { + activationDesc.m_Function = armnn::ActivationFunction::ReLu; + break; + } + case kTfLiteBuiltinRelu6: + { + activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu; + activationDesc.m_A = 6.0f; + break; + } + case kTfLiteBuiltinLogistic: + { + activationDesc.m_Function = armnn::ActivationFunction::Sigmoid; + break; + } + case kTfLiteBuiltinTanh: + { + activationDesc.m_Function = armnn::ActivationFunction::TanH; + activationDesc.m_A = 1.0f; + activationDesc.m_B = 1.0f; + break; + } + default: + { + return kTfLiteError; + } + } + if (!delegateData.m_Network) + { + return ValidateActivationOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + activationDesc); + } + armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc); + ARMNN_ASSERT(activationLayer != nullptr); + armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); - return kTfLiteError; + // Connect + return Connect(activationLayer, tfLiteNode, delegateData); } } // namespace armnnDelegate diff --git a/delegate/src/test/ActivationTest.cpp b/delegate/src/test/ActivationTest.cpp new file mode 100644 index 0000000000..f894d67372 --- /dev/null +++ b/delegate/src/test/ActivationTest.cpp @@ -0,0 +1,209 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ActivationTestHelper.hpp" + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace armnnDelegate +{ + + +void ActivationReLuTest(std::vector& backends) +{ + + std::vector inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + // Calculate output values for input. + auto f = [](float value) + { + return std::fmax(0.0f, value); + }; + std::vector outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_RELU, + backends, + inputData, + outputExpectedData); +} + +void ActivationBoundedReluTest(std::vector& backends) +{ + std::vector inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + const float a = 6.0f; + const float b = 0.0f; + // Calculate output values for input. + auto f = [a, b](float value) + { + return std::min(a, std::max(b, value)); + }; + std::vector outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_RELU6, + backends, + inputData, + outputExpectedData); +} + +void ActivationSigmoidTest(std::vector& backends) +{ + std::vector inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + // Calculate output values for input. + auto f = [](float value) + { + return 1.0f / (1.0f + std::exp(-value)); + }; + std::vector outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_LOGISTIC, + backends, + inputData, + outputExpectedData); +} + + +void ActivationTanHTest(std::vector& backends) +{ + std::vector inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + // Calculate output values for input. + auto f = [](float value) + { + return tanhf(value); + }; + std::vector outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_TANH, + backends, + inputData, + outputExpectedData); +} + +TEST_SUITE("Activation_CpuRefTests") +{ + +TEST_CASE ("Activation_ReLu_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ActivationReLuTest(backends); +} + +TEST_CASE ("Activation_Bounded_Relu6_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ActivationBoundedReluTest(backends); +} + +TEST_CASE ("Activation_Sigmoid_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ActivationSigmoidTest(backends); +} + + +TEST_CASE ("Activation_TanH_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ActivationTanHTest(backends); +} + +} + +TEST_SUITE("Activation_CpuAccTests") +{ + +TEST_CASE ("Activation_ReLu_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + ActivationReLuTest(backends); +} + +TEST_CASE ("Activation_Bounded_Relu6_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + ActivationBoundedReluTest(backends); +} + +TEST_CASE ("Activation_Sigmoid_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + ActivationSigmoidTest(backends); +} + + +TEST_CASE ("Activation_TanH_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + ActivationTanHTest(backends); +} + +} + +TEST_SUITE("Activation_GpuAccTests") +{ + +TEST_CASE ("Activation_ReLu_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + ActivationReLuTest(backends); +} + +TEST_CASE ("Activation_Bounded_Relu6_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + ActivationBoundedReluTest(backends); +} + +TEST_CASE ("Activation_Sigmoid_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + ActivationSigmoidTest(backends); +} + + +TEST_CASE ("Activation_TanH_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + ActivationTanHTest(backends); +} + +} + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/src/test/ActivationTestHelper.hpp b/delegate/src/test/ActivationTestHelper.hpp new file mode 100644 index 0000000000..04da3ccc49 --- /dev/null +++ b/delegate/src/test/ActivationTestHelper.hpp @@ -0,0 +1,140 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ + +std::vector CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode, + tflite::TensorType tensorType, + const std::vector & tensorShape) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::array, 1> buffers; + buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})); + + std::array, 2> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), tensorShape.size()), + tensorType); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), tensorShape.size()), + tensorType); + + // create operator + const std::vector operatorInputs{{0}}; + const std::vector operatorOutputs{{1}}; + flatbuffers::Offset unaryOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size())); + + const std::vector subgraphInputs{{0}}; + const std::vector subgraphOutputs{{1}}; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&unaryOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Activation Operator Model"); + flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, activationOperatorCode); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +void ActivationTest(tflite::BuiltinOperator activationOperatorCode, + std::vector& backends, + std::vector& inputValues, + std::vector& expectedOutputValues) +{ + using namespace tflite; + const std::vector inputShape { { 4, 1, 4} }; + std::vector modelBuffer = CreateActivationTfLiteModel(activationOperatorCode, + ::tflite::TensorType_FLOAT32, + inputShape); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; + auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + tfLiteDelageInputData[i] = inputValues[i]; + } + + auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + armnnDelegateInputData[i] = inputValues[i]; + } + // Run EnqueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + for (size_t i = 0; i < inputValues.size(); i++) + { + CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i])); + CHECK(tfLiteDelageOutputData[i] == doctest::Approx(armnnDelegateOutputData[i])); + } +} + +} // anonymous namespace \ No newline at end of file -- cgit v1.2.1