diff options
author | David Monahan <david.monahan@arm.com> | 2020-11-16 15:53:03 +0000 |
---|---|---|
committer | David Monahan <david.monahan@arm.com> | 2020-11-17 13:43:54 +0000 |
commit | 0cf84423f440aa2cad4b3e5f678d7a5f5b865eb4 (patch) | |
tree | 7230e40703a49a2edefe68938e5541ef89bcc011 /delegate/src/test | |
parent | 05e9fd2f00fcd17b7103c02be06e305076739e15 (diff) | |
download | armnn-0cf84423f440aa2cad4b3e5f678d7a5f5b865eb4.tar.gz |
IVGCVSW-5382 TfLiteDelegate: Implement the Activation operators
* Added TfLiteDelegate implementations for ReLu, Relu6, Logistic, and TanH
Activation Functions
Signed-off-by: David Monahan <david.monahan@arm.com>
Change-Id: Id021b4ec9c10fd4357535fe2a665f32c053dad61
Diffstat (limited to 'delegate/src/test')
-rw-r--r-- | delegate/src/test/ActivationTest.cpp | 209 | ||||
-rw-r--r-- | delegate/src/test/ActivationTestHelper.hpp | 140 |
2 files changed, 349 insertions, 0 deletions
diff --git a/delegate/src/test/ActivationTest.cpp b/delegate/src/test/ActivationTest.cpp new file mode 100644 index 0000000000..f894d67372 --- /dev/null +++ b/delegate/src/test/ActivationTest.cpp @@ -0,0 +1,209 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ActivationTestHelper.hpp" + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/interpreter.h> +#include <tensorflow/lite/kernels/register.h> +#include <tensorflow/lite/model.h> +#include <tensorflow/lite/schema/schema_generated.h> +#include <tensorflow/lite/version.h> + +#include <doctest/doctest.h> + +namespace armnnDelegate +{ + + +void ActivationReLuTest(std::vector<armnn::BackendId>& backends) +{ + + std::vector<float> inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + // Calculate output values for input. + auto f = [](float value) + { + return std::fmax(0.0f, value); + }; + std::vector<float> outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_RELU, + backends, + inputData, + outputExpectedData); +} + +void ActivationBoundedReluTest(std::vector<armnn::BackendId>& backends) +{ + std::vector<float> inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + const float a = 6.0f; + const float b = 0.0f; + // Calculate output values for input. + auto f = [a, b](float value) + { + return std::min(a, std::max(b, value)); + }; + std::vector<float> outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_RELU6, + backends, + inputData, + outputExpectedData); +} + +void ActivationSigmoidTest(std::vector<armnn::BackendId>& backends) +{ + std::vector<float> inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + // Calculate output values for input. + auto f = [](float value) + { + return 1.0f / (1.0f + std::exp(-value)); + }; + std::vector<float> outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_LOGISTIC, + backends, + inputData, + outputExpectedData); +} + + +void ActivationTanHTest(std::vector<armnn::BackendId>& backends) +{ + std::vector<float> inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + // Calculate output values for input. + auto f = [](float value) + { + return tanhf(value); + }; + std::vector<float> outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + ActivationTest(tflite::BuiltinOperator_TANH, + backends, + inputData, + outputExpectedData); +} + +TEST_SUITE("Activation_CpuRefTests") +{ + +TEST_CASE ("Activation_ReLu_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ActivationReLuTest(backends); +} + +TEST_CASE ("Activation_Bounded_Relu6_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ActivationBoundedReluTest(backends); +} + +TEST_CASE ("Activation_Sigmoid_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ActivationSigmoidTest(backends); +} + + +TEST_CASE ("Activation_TanH_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ActivationTanHTest(backends); +} + +} + +TEST_SUITE("Activation_CpuAccTests") +{ + +TEST_CASE ("Activation_ReLu_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ActivationReLuTest(backends); +} + +TEST_CASE ("Activation_Bounded_Relu6_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ActivationBoundedReluTest(backends); +} + +TEST_CASE ("Activation_Sigmoid_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ActivationSigmoidTest(backends); +} + + +TEST_CASE ("Activation_TanH_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ActivationTanHTest(backends); +} + +} + +TEST_SUITE("Activation_GpuAccTests") +{ + +TEST_CASE ("Activation_ReLu_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ActivationReLuTest(backends); +} + +TEST_CASE ("Activation_Bounded_Relu6_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ActivationBoundedReluTest(backends); +} + +TEST_CASE ("Activation_Sigmoid_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ActivationSigmoidTest(backends); +} + + +TEST_CASE ("Activation_TanH_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ActivationTanHTest(backends); +} + +} + +} // namespace armnnDelegate
\ No newline at end of file diff --git a/delegate/src/test/ActivationTestHelper.hpp b/delegate/src/test/ActivationTestHelper.hpp new file mode 100644 index 0000000000..04da3ccc49 --- /dev/null +++ b/delegate/src/test/ActivationTestHelper.hpp @@ -0,0 +1,140 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/interpreter.h> +#include <tensorflow/lite/kernels/register.h> +#include <tensorflow/lite/model.h> +#include <tensorflow/lite/schema/schema_generated.h> +#include <tensorflow/lite/version.h> + +#include <doctest/doctest.h> + +namespace +{ + +std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode, + tflite::TensorType tensorType, + const std::vector <int32_t>& tensorShape) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers; + buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})); + + std::array<flatbuffers::Offset<Tensor>, 2> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()), + tensorType); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()), + tensorType); + + // create operator + const std::vector<int> operatorInputs{{0}}; + const std::vector<int> operatorOutputs{{1}}; + flatbuffers::Offset <Operator> unaryOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size())); + + const std::vector<int> subgraphInputs{{0}}; + const std::vector<int> subgraphOutputs{{1}}; + flatbuffers::Offset <SubGraph> subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&unaryOperator, 1)); + + flatbuffers::Offset <flatbuffers::String> modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Activation Operator Model"); + flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, activationOperatorCode); + + flatbuffers::Offset <Model> flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector<char>(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +void ActivationTest(tflite::BuiltinOperator activationOperatorCode, + std::vector<armnn::BackendId>& backends, + std::vector<float>& inputValues, + std::vector<float>& expectedOutputValues) +{ + using namespace tflite; + const std::vector<int32_t> inputShape { { 4, 1, 4} }; + std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode, + ::tflite::TensorType_FLOAT32, + inputShape); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr<Interpreter> armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr<Interpreter> tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)> + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; + auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + tfLiteDelageInputData[i] = inputValues[i]; + } + + auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + armnnDelegateInputData[i] = inputValues[i]; + } + // Run EnqueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId); + for (size_t i = 0; i < inputValues.size(); i++) + { + CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i])); + CHECK(tfLiteDelageOutputData[i] == doctest::Approx(armnnDelegateOutputData[i])); + } +} + +} // anonymous namespace
\ No newline at end of file |