From a8578103d1fe621f97ff2cfd842a8e33c1b652c8 Mon Sep 17 00:00:00 2001 From: James Ward Date: Fri, 13 Nov 2020 18:05:04 +0000 Subject: IVGCVSW-5395 TfLiteDelegate: Implement the Softmax operators Signed-off-by: James Ward Change-Id: I9f098c6b62ebb08e727aa8547e08bddc0b814705 --- delegate/src/test/SoftmaxTest.cpp | 129 ++++++++++++++++++++++++ delegate/src/test/SoftmaxTestHelper.hpp | 170 ++++++++++++++++++++++++++++++++ 2 files changed, 299 insertions(+) create mode 100644 delegate/src/test/SoftmaxTest.cpp create mode 100644 delegate/src/test/SoftmaxTestHelper.hpp (limited to 'delegate/src/test') diff --git a/delegate/src/test/SoftmaxTest.cpp b/delegate/src/test/SoftmaxTest.cpp new file mode 100644 index 0000000000..3aacfe0a04 --- /dev/null +++ b/delegate/src/test/SoftmaxTest.cpp @@ -0,0 +1,129 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "SoftmaxTestHelper.hpp" + +#include + +#include +#include + +#include + +namespace armnnDelegate +{ + +/// Convenience function to run softmax and log-softmax test cases +/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX +/// \param backends armnn backends to target +/// \param beta multiplicative parameter to the softmax function +/// \param expectedOutput to be checked against transformed input +void SoftmaxTestCase(tflite::BuiltinOperator operatorCode, + std::vector backends, float beta, std::vector expectedOutput) { + std::vector input = { + 1.0, 2.5, 3.0, 4.5, 5.0, + -1.0, -2.5, -3.0, -4.5, -5.0}; + std::vector shape = {2, 5}; + + SoftmaxTest(operatorCode, + tflite::TensorType_FLOAT32, + backends, + shape, + input, + expectedOutput, + beta); +} + +TEST_SUITE ("Softmax_GpuAccTests") +{ + +TEST_CASE ("Softmax_Standard_Beta_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + std::vector expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606, + 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput); +} + +TEST_CASE ("Softmax_Different_Beta_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + std::vector expectedOutput = {0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, 0.352414012, + 0.224709094, 0.193408906, 0.123322964, 0.106145054}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput); + +} + +TEST_CASE ("Log_Softmax_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + std::vector expectedOutput = + {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664, + -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449}; + SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput); +} +} // TEST_SUITE ("Softmax_GpuAccTests") + +TEST_SUITE ("Softmax_CpuAccTests") +{ + +TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + std::vector expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606, + 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput); +} + +TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + std::vector expectedOutput = { + 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, + 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput); +} + +TEST_CASE ("Log_Softmax_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + std::vector expectedOutput = + {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664, + -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449}; + SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput); +} +} // TEST_SUITE ("Softmax_CpuAccTests") + +TEST_SUITE ("Softmax_CpuRefTests") +{ + +TEST_CASE ("Softmax_Standard_Beta_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + std::vector expectedOutput = { + 0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606, + 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput); +} + +TEST_CASE ("Softmax_Different_Beta_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + std::vector expectedOutput = { + 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, + 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput); +} + +TEST_CASE ("Log_Softmax_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + std::vector expectedOutput = + {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664, + -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449}; + SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput); +} +} // TEST_SUITE ("Softmax_CpuRefTests") +} // namespace armnnDelegate diff --git a/delegate/src/test/SoftmaxTestHelper.hpp b/delegate/src/test/SoftmaxTestHelper.hpp new file mode 100644 index 0000000000..0474561a93 --- /dev/null +++ b/delegate/src/test/SoftmaxTestHelper.hpp @@ -0,0 +1,170 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ +std::vector CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode, + tflite::TensorType tensorType, + const std::vector & tensorShape, + float beta) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + std::array, 2> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0); + + const std::vector operatorInputs({0}); + const std::vector operatorOutputs({1}); + + flatbuffers::Offset softmaxOperator; + flatbuffers::Offset modelDescription; + flatbuffers::Offset operatorCode; + + switch (softmaxOperatorCode) + { + case tflite::BuiltinOperator_SOFTMAX: + softmaxOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + BuiltinOptions_SoftmaxOptions, + CreateSoftmaxOptions(flatBufferBuilder, beta).Union()); + modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Softmax Operator Model"); + operatorCode = CreateOperatorCode(flatBufferBuilder, + tflite::BuiltinOperator_SOFTMAX); + break; + case tflite::BuiltinOperator_LOG_SOFTMAX: + softmaxOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + BuiltinOptions_LogSoftmaxOptions, + CreateLogSoftmaxOptions(flatBufferBuilder).Union()); + flatBufferBuilder.CreateString("ArmnnDelegate: Log-Softmax Operator Model"); + operatorCode = CreateOperatorCode(flatBufferBuilder, + tflite::BuiltinOperator_LOG_SOFTMAX); + break; + default: + break; + } + const std::vector subgraphInputs({0}); + const std::vector subgraphOutputs({1}); + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&softmaxOperator, 1)); + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + flatBufferBuilder.Finish(flatbufferModel); + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode, + tflite::TensorType tensorType, + std::vector& backends, + std::vector& shape, + std::vector& inputValues, + std::vector& expectedOutputValues, + float beta = 0) +{ + using namespace tflite; + std::vector modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode, + tensorType, + shape, + beta); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; + auto tfLiteInterpreterInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + tfLiteInterpreterInputData[i] = inputValues[i]; + } + + auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + armnnDelegateInputData[i] = inputValues[i]; + } + // Run EnqueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor(tfLiteInterpreterOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + + for (size_t i = 0; i < inputValues.size(); ++i) + { + CHECK(armnnUtils::within_percentage_tolerance(expectedOutputValues[i], armnnDelegateOutputData[i], 1e-5)); + CHECK(armnnUtils::within_percentage_tolerance(tfLiteInterpreterOutputData[i], + armnnDelegateOutputData[i], 1e-5)); + } +} + +} // anonymous namespace -- cgit v1.2.1