aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Ward <james.ward@arm.com>2020-11-13 18:05:04 +0000
committerJim Flynn <jim.flynn@arm.com>2020-11-17 15:36:19 +0000
commita8578103d1fe621f97ff2cfd842a8e33c1b652c8 (patch)
tree071986839c5d6aa865fdd4212a83c9c684ef5231
parent0cf84423f440aa2cad4b3e5f678d7a5f5b865eb4 (diff)
downloadarmnn-a8578103d1fe621f97ff2cfd842a8e33c1b652c8.tar.gz
IVGCVSW-5395 TfLiteDelegate: Implement the Softmax operators
Signed-off-by: James Ward <james.ward@arm.com> Change-Id: I9f098c6b62ebb08e727aa8547e08bddc0b814705
-rw-r--r--delegate/CMakeLists.txt6
-rw-r--r--delegate/src/Softmax.hpp128
-rw-r--r--delegate/src/test/SoftmaxTest.cpp129
-rw-r--r--delegate/src/test/SoftmaxTestHelper.hpp170
4 files changed, 424 insertions, 9 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 3c77dcf2f3..595784f37a 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -126,9 +126,11 @@ if(BUILD_UNIT_TESTS)
src/test/QuantizationTestHelper.hpp
src/test/ResizeTest.cpp
src/test/ResizeTestHelper.hpp
+ src/test/SoftmaxTest.cpp
+ src/test/SoftmaxTestHelper.hpp
+ src/test/TestUtils.hpp
src/test/TransposeTest.cpp
- src/test/TransposeTestHelper.hpp
- src/test/TestUtils.hpp)
+ src/test/TransposeTestHelper.hpp)
add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources})
target_include_directories(DelegateUnitTests PRIVATE third-party)
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
index ddadbc73c8..0de8e1438c 100644
--- a/delegate/src/Softmax.hpp
+++ b/delegate/src/Softmax.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <armnn/utility/IgnoreUnused.hpp>
+#include "DelegateUtils.hpp"
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
@@ -15,19 +15,133 @@
namespace armnnDelegate
{
+TfLiteStatus ValidateSoftmaxOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::SoftmaxDescriptor& descriptor)
+{
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsSoftmaxSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputInfo,
+ outputTensorInfo,
+ descriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+
+TfLiteStatus ValidateLogSoftmaxOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::LogSoftmaxDescriptor& descriptor)
+{
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsLogSoftmaxSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputInfo,
+ outputTensorInfo,
+ descriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
int nodeIndex,
int32_t softmaxOperatorCode)
{
- armnn::IgnoreUnused(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- softmaxOperatorCode);
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+
+ if (!delegateData.m_Network)
+ {
+ switch(softmaxOperatorCode)
+ {
+ case kTfLiteBuiltinSoftmax:
+ {
+ armnn::SoftmaxDescriptor descriptor;
+ auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data);
+ descriptor.m_Beta = params->beta;
+ return ValidateSoftmaxOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ }
+ case kTfLiteBuiltinLogSoftmax:
+ {
+ armnn::LogSoftmaxDescriptor descriptor;
+ return ValidateLogSoftmaxOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ }
+ default:
+ return kTfLiteError;
+ }
+ }
+
+ armnn::IConnectableLayer* softmaxLayer = nullptr;
+
+ switch(softmaxOperatorCode)
+ {
+ case kTfLiteBuiltinSoftmax:
+ {
+ armnn::SoftmaxDescriptor descriptor;
+ auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data);
+ descriptor.m_Beta = params->beta;
+ softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor);
+ break;
+ }
+ case kTfLiteBuiltinLogSoftmax:
+ {
+ armnn::LogSoftmaxDescriptor descriptor;
+ softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor);
+ break;
+ }
+ default:
+ return kTfLiteError;
+ }
+ ARMNN_ASSERT(softmaxLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = softmaxLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
- return kTfLiteError;
+ // Connect
+ return Connect(softmaxLayer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate
diff --git a/delegate/src/test/SoftmaxTest.cpp b/delegate/src/test/SoftmaxTest.cpp
new file mode 100644
index 0000000000..3aacfe0a04
--- /dev/null
+++ b/delegate/src/test/SoftmaxTest.cpp
@@ -0,0 +1,129 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SoftmaxTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+/// Convenience function to run softmax and log-softmax test cases
+/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
+/// \param backends armnn backends to target
+/// \param beta multiplicative parameter to the softmax function
+/// \param expectedOutput to be checked against transformed input
+void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
+ std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
+ std::vector<float> input = {
+ 1.0, 2.5, 3.0, 4.5, 5.0,
+ -1.0, -2.5, -3.0, -4.5, -5.0};
+ std::vector<int32_t> shape = {2, 5};
+
+ SoftmaxTest(operatorCode,
+ tflite::TensorType_FLOAT32,
+ backends,
+ shape,
+ input,
+ expectedOutput,
+ beta);
+}
+
+TEST_SUITE ("Softmax_GpuAccTests")
+{
+
+TEST_CASE ("Softmax_Standard_Beta_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
+ 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+}
+
+TEST_CASE ("Softmax_Different_Beta_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ std::vector<float> expectedOutput = {0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, 0.352414012,
+ 0.224709094, 0.193408906, 0.123322964, 0.106145054};
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+
+}
+
+TEST_CASE ("Log_Softmax_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ std::vector<float> expectedOutput =
+ {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
+ -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
+ SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
+}
+} // TEST_SUITE ("Softmax_GpuAccTests")
+
+TEST_SUITE ("Softmax_CpuAccTests")
+{
+
+TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
+ 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+}
+
+TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<float> expectedOutput = {
+ 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
+ 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+}
+
+TEST_CASE ("Log_Softmax_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<float> expectedOutput =
+ {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
+ -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
+ SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
+}
+} // TEST_SUITE ("Softmax_CpuAccTests")
+
+TEST_SUITE ("Softmax_CpuRefTests")
+{
+
+TEST_CASE ("Softmax_Standard_Beta_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<float> expectedOutput = {
+ 0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
+ 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+}
+
+TEST_CASE ("Softmax_Different_Beta_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<float> expectedOutput = {
+ 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
+ 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+}
+
+TEST_CASE ("Log_Softmax_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<float> expectedOutput =
+ {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
+ -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
+ SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
+}
+} // TEST_SUITE ("Softmax_CpuRefTests")
+} // namespace armnnDelegate
diff --git a/delegate/src/test/SoftmaxTestHelper.hpp b/delegate/src/test/SoftmaxTestHelper.hpp
new file mode 100644
index 0000000000..0474561a93
--- /dev/null
+++ b/delegate/src/test/SoftmaxTestHelper.hpp
@@ -0,0 +1,170 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+#include <armnnUtils/FloatingPointComparison.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,
+ tflite::TensorType tensorType,
+ const std::vector <int32_t>& tensorShape,
+ float beta)
+{
+ using namespace tflite;
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+ std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+ buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+
+ std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+ tensors[0] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+ tensorShape.size()),
+ tensorType,
+ 0);
+ tensors[1] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+ tensorShape.size()),
+ tensorType,
+ 0);
+
+ const std::vector<int32_t> operatorInputs({0});
+ const std::vector<int32_t> operatorOutputs({1});
+
+ flatbuffers::Offset<Operator> softmaxOperator;
+ flatbuffers::Offset<flatbuffers::String> modelDescription;
+ flatbuffers::Offset<OperatorCode> operatorCode;
+
+ switch (softmaxOperatorCode)
+ {
+ case tflite::BuiltinOperator_SOFTMAX:
+ softmaxOperator =
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ BuiltinOptions_SoftmaxOptions,
+ CreateSoftmaxOptions(flatBufferBuilder, beta).Union());
+ modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Softmax Operator Model");
+ operatorCode = CreateOperatorCode(flatBufferBuilder,
+ tflite::BuiltinOperator_SOFTMAX);
+ break;
+ case tflite::BuiltinOperator_LOG_SOFTMAX:
+ softmaxOperator =
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ BuiltinOptions_LogSoftmaxOptions,
+ CreateLogSoftmaxOptions(flatBufferBuilder).Union());
+ flatBufferBuilder.CreateString("ArmnnDelegate: Log-Softmax Operator Model");
+ operatorCode = CreateOperatorCode(flatBufferBuilder,
+ tflite::BuiltinOperator_LOG_SOFTMAX);
+ break;
+ default:
+ break;
+ }
+ const std::vector<int32_t> subgraphInputs({0});
+ const std::vector<int32_t> subgraphOutputs({1});
+ flatbuffers::Offset<SubGraph> subgraph =
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&softmaxOperator, 1));
+ flatbuffers::Offset<Model> flatbufferModel =
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+ flatBufferBuilder.Finish(flatbufferModel);
+ return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+ flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
+ tflite::TensorType tensorType,
+ std::vector<armnn::BackendId>& backends,
+ std::vector<int32_t>& shape,
+ std::vector<float>& inputValues,
+ std::vector<float>& expectedOutputValues,
+ float beta = 0)
+{
+ using namespace tflite;
+ std::vector<char> modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode,
+ tensorType,
+ shape,
+ beta);
+
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+ // Create TfLite Interpreters
+ std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegateInterpreter) == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter != nullptr);
+ CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteInterpreter) == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+ CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ CHECK(theArmnnDelegate != nullptr);
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+ // Set input data
+ auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+ auto tfLiteInterpreterInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ tfLiteInterpreterInputData[i] = inputValues[i];
+ }
+
+ auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+ auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ armnnDelegateInputData[i] = inputValues[i];
+ }
+ // Run EnqueWorkload
+ CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+ // Compare output data
+ auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
+ auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+
+ for (size_t i = 0; i < inputValues.size(); ++i)
+ {
+ CHECK(armnnUtils::within_percentage_tolerance(expectedOutputValues[i], armnnDelegateOutputData[i], 1e-5));
+ CHECK(armnnUtils::within_percentage_tolerance(tfLiteInterpreterOutputData[i],
+ armnnDelegateOutputData[i], 1e-5));
+ }
+}
+
+} // anonymous namespace