From a8578103d1fe621f97ff2cfd842a8e33c1b652c8 Mon Sep 17 00:00:00 2001 From: James Ward Date: Fri, 13 Nov 2020 18:05:04 +0000 Subject: IVGCVSW-5395 TfLiteDelegate: Implement the Softmax operators Signed-off-by: James Ward Change-Id: I9f098c6b62ebb08e727aa8547e08bddc0b814705 --- delegate/CMakeLists.txt | 6 +- delegate/src/Softmax.hpp | 128 ++++++++++++++++++++++-- delegate/src/test/SoftmaxTest.cpp | 129 ++++++++++++++++++++++++ delegate/src/test/SoftmaxTestHelper.hpp | 170 ++++++++++++++++++++++++++++++++ 4 files changed, 424 insertions(+), 9 deletions(-) create mode 100644 delegate/src/test/SoftmaxTest.cpp create mode 100644 delegate/src/test/SoftmaxTestHelper.hpp diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index 3c77dcf2f3..595784f37a 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -126,9 +126,11 @@ if(BUILD_UNIT_TESTS) src/test/QuantizationTestHelper.hpp src/test/ResizeTest.cpp src/test/ResizeTestHelper.hpp + src/test/SoftmaxTest.cpp + src/test/SoftmaxTestHelper.hpp + src/test/TestUtils.hpp src/test/TransposeTest.cpp - src/test/TransposeTestHelper.hpp - src/test/TestUtils.hpp) + src/test/TransposeTestHelper.hpp) add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources}) target_include_directories(DelegateUnitTests PRIVATE third-party) diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp index ddadbc73c8..0de8e1438c 100644 --- a/delegate/src/Softmax.hpp +++ b/delegate/src/Softmax.hpp @@ -5,7 +5,7 @@ #pragma once -#include +#include "DelegateUtils.hpp" #include #include @@ -15,19 +15,133 @@ namespace armnnDelegate { +TfLiteStatus ValidateSoftmaxOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputTensorInfo, + const armnn::SoftmaxDescriptor& descriptor) +{ + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsSoftmaxSupported, + delegateData.m_Backends, + isSupported, + inputInfo, + outputTensorInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; +} + + +TfLiteStatus ValidateLogSoftmaxOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputTensorInfo, + const armnn::LogSoftmaxDescriptor& descriptor) +{ + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsLogSoftmaxSupported, + delegateData.m_Backends, + isSupported, + inputInfo, + outputTensorInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; +} + TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, int32_t softmaxOperatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - softmaxOperatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (IsDynamicTensor(tfLiteInputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ", + nodeIndex); + return kTfLiteError; + } + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + + if (!delegateData.m_Network) + { + switch(softmaxOperatorCode) + { + case kTfLiteBuiltinSoftmax: + { + armnn::SoftmaxDescriptor descriptor; + auto* params = reinterpret_cast(tfLiteNode->builtin_data); + descriptor.m_Beta = params->beta; + return ValidateSoftmaxOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + descriptor); + } + case kTfLiteBuiltinLogSoftmax: + { + armnn::LogSoftmaxDescriptor descriptor; + return ValidateLogSoftmaxOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + descriptor); + } + default: + return kTfLiteError; + } + } + + armnn::IConnectableLayer* softmaxLayer = nullptr; + + switch(softmaxOperatorCode) + { + case kTfLiteBuiltinSoftmax: + { + armnn::SoftmaxDescriptor descriptor; + auto* params = reinterpret_cast(tfLiteNode->builtin_data); + descriptor.m_Beta = params->beta; + softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor); + break; + } + case kTfLiteBuiltinLogSoftmax: + { + armnn::LogSoftmaxDescriptor descriptor; + softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor); + break; + } + default: + return kTfLiteError; + } + ARMNN_ASSERT(softmaxLayer != nullptr); + + armnn::IOutputSlot& outputSlot = softmaxLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); - return kTfLiteError; + // Connect + return Connect(softmaxLayer, tfLiteNode, delegateData); } } // namespace armnnDelegate diff --git a/delegate/src/test/SoftmaxTest.cpp b/delegate/src/test/SoftmaxTest.cpp new file mode 100644 index 0000000000..3aacfe0a04 --- /dev/null +++ b/delegate/src/test/SoftmaxTest.cpp @@ -0,0 +1,129 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "SoftmaxTestHelper.hpp" + +#include + +#include +#include + +#include + +namespace armnnDelegate +{ + +/// Convenience function to run softmax and log-softmax test cases +/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX +/// \param backends armnn backends to target +/// \param beta multiplicative parameter to the softmax function +/// \param expectedOutput to be checked against transformed input +void SoftmaxTestCase(tflite::BuiltinOperator operatorCode, + std::vector backends, float beta, std::vector expectedOutput) { + std::vector input = { + 1.0, 2.5, 3.0, 4.5, 5.0, + -1.0, -2.5, -3.0, -4.5, -5.0}; + std::vector shape = {2, 5}; + + SoftmaxTest(operatorCode, + tflite::TensorType_FLOAT32, + backends, + shape, + input, + expectedOutput, + beta); +} + +TEST_SUITE ("Softmax_GpuAccTests") +{ + +TEST_CASE ("Softmax_Standard_Beta_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + std::vector expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606, + 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput); +} + +TEST_CASE ("Softmax_Different_Beta_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + std::vector expectedOutput = {0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, 0.352414012, + 0.224709094, 0.193408906, 0.123322964, 0.106145054}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput); + +} + +TEST_CASE ("Log_Softmax_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + std::vector expectedOutput = + {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664, + -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449}; + SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput); +} +} // TEST_SUITE ("Softmax_GpuAccTests") + +TEST_SUITE ("Softmax_CpuAccTests") +{ + +TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + std::vector expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606, + 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput); +} + +TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + std::vector expectedOutput = { + 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, + 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput); +} + +TEST_CASE ("Log_Softmax_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + std::vector expectedOutput = + {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664, + -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449}; + SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput); +} +} // TEST_SUITE ("Softmax_CpuAccTests") + +TEST_SUITE ("Softmax_CpuRefTests") +{ + +TEST_CASE ("Softmax_Standard_Beta_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + std::vector expectedOutput = { + 0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606, + 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput); +} + +TEST_CASE ("Softmax_Different_Beta_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + std::vector expectedOutput = { + 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, + 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054}; + SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput); +} + +TEST_CASE ("Log_Softmax_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + std::vector expectedOutput = + {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664, + -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449}; + SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput); +} +} // TEST_SUITE ("Softmax_CpuRefTests") +} // namespace armnnDelegate diff --git a/delegate/src/test/SoftmaxTestHelper.hpp b/delegate/src/test/SoftmaxTestHelper.hpp new file mode 100644 index 0000000000..0474561a93 --- /dev/null +++ b/delegate/src/test/SoftmaxTestHelper.hpp @@ -0,0 +1,170 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ +std::vector CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode, + tflite::TensorType tensorType, + const std::vector & tensorShape, + float beta) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + std::array, 2> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorShape.data(), + tensorShape.size()), + tensorType, + 0); + + const std::vector operatorInputs({0}); + const std::vector operatorOutputs({1}); + + flatbuffers::Offset softmaxOperator; + flatbuffers::Offset modelDescription; + flatbuffers::Offset operatorCode; + + switch (softmaxOperatorCode) + { + case tflite::BuiltinOperator_SOFTMAX: + softmaxOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + BuiltinOptions_SoftmaxOptions, + CreateSoftmaxOptions(flatBufferBuilder, beta).Union()); + modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Softmax Operator Model"); + operatorCode = CreateOperatorCode(flatBufferBuilder, + tflite::BuiltinOperator_SOFTMAX); + break; + case tflite::BuiltinOperator_LOG_SOFTMAX: + softmaxOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + BuiltinOptions_LogSoftmaxOptions, + CreateLogSoftmaxOptions(flatBufferBuilder).Union()); + flatBufferBuilder.CreateString("ArmnnDelegate: Log-Softmax Operator Model"); + operatorCode = CreateOperatorCode(flatBufferBuilder, + tflite::BuiltinOperator_LOG_SOFTMAX); + break; + default: + break; + } + const std::vector subgraphInputs({0}); + const std::vector subgraphOutputs({1}); + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&softmaxOperator, 1)); + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + flatBufferBuilder.Finish(flatbufferModel); + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode, + tflite::TensorType tensorType, + std::vector& backends, + std::vector& shape, + std::vector& inputValues, + std::vector& expectedOutputValues, + float beta = 0) +{ + using namespace tflite; + std::vector modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode, + tensorType, + shape, + beta); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; + auto tfLiteInterpreterInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + tfLiteInterpreterInputData[i] = inputValues[i]; + } + + auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + armnnDelegateInputData[i] = inputValues[i]; + } + // Run EnqueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor(tfLiteInterpreterOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + + for (size_t i = 0; i < inputValues.size(); ++i) + { + CHECK(armnnUtils::within_percentage_tolerance(expectedOutputValues[i], armnnDelegateOutputData[i], 1e-5)); + CHECK(armnnUtils::within_percentage_tolerance(tfLiteInterpreterOutputData[i], + armnnDelegateOutputData[i], 1e-5)); + } +} + +} // anonymous namespace -- cgit v1.2.1