aboutsummaryrefslogtreecommitdiff
path: root/delegate
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2021-04-08 11:47:23 +0100
committerKeithARM <keith.davis@arm.com>2021-04-09 10:41:09 +0000
commit7c67fabc86b6647855beebac9f6cfe92341357cb (patch)
treef7001e3422918898cc0c181beeee3917088cfa12 /delegate
parent015b3f025d1f1ccb75d3f437bee8ed8dcfee302b (diff)
downloadarmnn-7c67fabc86b6647855beebac9f6cfe92341357cb.tar.gz
IVGCVSW-5803 Delegate Unit Tests Failure on Android: Normalization & Softmax
Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: I2873f8563cc11da550d460b04e5175372489a564
Diffstat (limited to 'delegate')
-rw-r--r--delegate/CMakeLists.txt30
-rw-r--r--delegate/src/test/NeonDelegateTests_NDK_Issue.cpp63
-rw-r--r--delegate/src/test/NormalizationTest.cpp94
-rw-r--r--delegate/src/test/NormalizationTestHelper.hpp81
-rw-r--r--delegate/src/test/SoftmaxTest.cpp52
-rw-r--r--delegate/src/test/SoftmaxTestHelper.hpp22
6 files changed, 196 insertions, 146 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 5dbe83e014..d72089ca85 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -170,6 +170,36 @@ if(BUILD_UNIT_TESTS)
src/test/TransposeTest.cpp
src/test/TransposeTestHelper.hpp)
+ # There's a known Android NDK bug which causes a subset of NeonLayerTests to
+ # fail. We'll exclude these tests in NeonLayerTests_NDK_Bug.cpp if we're doing
+ # a debug build and NDK is less than r21.
+ # https://github.com/android/ndk/issues/1135
+
+ # Default to always including these tests.
+ set(INCLUDE_NDK_BUG_TESTS "ON")
+ # Reconsider if we in a debug build.
+ string( TOLOWER ${CMAKE_BUILD_TYPE} BUILD_TYPE_LOWERCASE )
+ if ( NOT BUILD_TYPE_LOWERCASE STREQUAL "release" )
+ message("CMAKE:: BUILD TYPE IS ${CMAKE_BUILD_TYPE}")
+ # And NDK_VERSION has been set.
+ if ( DEFINED NDK_VERSION )
+ message("CMAKE:: NDK DEFINED")
+ # And the version is less than r21.
+ if ( ${NDK_VERSION} STRLESS "r21" )
+ message("CMAKE:: BUG TESTS OFF")
+ set(INCLUDE_NDK_BUG_TESTS "OFF")
+ endif()
+ endif()
+ endif()
+
+ if ( INCLUDE_NDK_BUG_TESTS STREQUAL "ON" )
+ list(APPEND armnnDelegate_unittest_sources
+ src/test/NeonDelegateTests_NDK_Issue.cpp
+ )
+ else()
+
+ endif()
+
add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources})
target_include_directories(DelegateUnitTests PRIVATE third-party)
diff --git a/delegate/src/test/NeonDelegateTests_NDK_Issue.cpp b/delegate/src/test/NeonDelegateTests_NDK_Issue.cpp
new file mode 100644
index 0000000000..a437a08a49
--- /dev/null
+++ b/delegate/src/test/NeonDelegateTests_NDK_Issue.cpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NormalizationTestHelper.hpp"
+#include "SoftmaxTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+// There's a known Android NDK bug which causes this subset of Neon Tests to
+// fail. We'll exclude these tests in if we're doing
+// a debug build and NDK is less than r21.
+// The exclusion takes place in test/CMakeLists.txt
+// https://github.com/android/ndk/issues/1135
+
+TEST_SUITE ("Softmax_CpuAccTests")
+{
+
+TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
+ 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+}
+
+TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<float> expectedOutput = {
+ 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
+ 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
+ SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+}
+
+TEST_CASE ("Log_Softmax_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<float> expectedOutput =
+ {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
+ -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
+ SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
+}
+} // TEST_SUITE ("Softmax_CpuAccTests")
+
+TEST_SUITE("L2Normalization_CpuAccTests")
+{
+
+TEST_CASE ("L2NormalizationFp32Test_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ L2NormalizationTest(backends);
+}
+} // TEST_SUITE("L2NormalizationFp32Test_CpuAcc_Test")
+} \ No newline at end of file
diff --git a/delegate/src/test/NormalizationTest.cpp b/delegate/src/test/NormalizationTest.cpp
index 058394edb7..e33dcf056e 100644
--- a/delegate/src/test/NormalizationTest.cpp
+++ b/delegate/src/test/NormalizationTest.cpp
@@ -8,95 +8,12 @@
#include <armnn_delegate.hpp>
#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
#include <doctest/doctest.h>
namespace armnnDelegate
{
-void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 1, 1, 10 };
- std::vector<int32_t> outputShape { 1, 1, 1, 10 };
-
- std::vector<float> inputValues
- {
- 1.0f,
- 2.0f,
- 3.0f,
- 4.0f,
- 5.0f,
- 6.0f,
- 7.0f,
- 8.0f,
- 9.0f,
- 10.0f
- };
-
- const float approxInvL2Norm = 0.050964719f;
- std::vector<float> expectedOutputValues
- {
- 1.0f * approxInvL2Norm,
- 2.0f * approxInvL2Norm,
- 3.0f * approxInvL2Norm,
- 4.0f * approxInvL2Norm,
- 5.0f * approxInvL2Norm,
- 6.0f * approxInvL2Norm,
- 7.0f * approxInvL2Norm,
- 8.0f * approxInvL2Norm,
- 9.0f * approxInvL2Norm,
- 10.0f * approxInvL2Norm
- };
-
- NormalizationTest<float>(tflite::BuiltinOperator_L2_NORMALIZATION,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void LocalResponseNormalizationTest(std::vector<armnn::BackendId>& backends,
- int32_t radius,
- float bias,
- float alpha,
- float beta)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 2, 2, 1 };
- std::vector<int32_t> outputShape { 2, 2, 2, 1 };
-
- std::vector<float> inputValues
- {
- 1.0f, 2.0f,
- 3.0f, 4.0f,
- 5.0f, 6.0f,
- 7.0f, 8.0f
- };
-
- std::vector<float> expectedOutputValues
- {
- 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
- 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f
- };
-
- NormalizationTest<float>(tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- radius,
- bias,
- alpha,
- beta);
-}
-
-
TEST_SUITE("L2Normalization_CpuRefTests")
{
@@ -108,17 +25,6 @@ TEST_CASE ("L2NormalizationFp32Test_CpuRef_Test")
} // TEST_SUITE("L2Normalization_CpuRefTests")
-TEST_SUITE("L2Normalization_CpuAccTests")
-{
-
-TEST_CASE ("L2NormalizationFp32Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2NormalizationTest(backends);
-}
-
-} // TEST_SUITE("L2NormalizationFp32Test_CpuAcc_Test")
-
TEST_SUITE("L2Normalization_GpuAccTests")
{
diff --git a/delegate/src/test/NormalizationTestHelper.hpp b/delegate/src/test/NormalizationTestHelper.hpp
index bc969c248d..ebdfdc1a25 100644
--- a/delegate/src/test/NormalizationTestHelper.hpp
+++ b/delegate/src/test/NormalizationTestHelper.hpp
@@ -178,4 +178,85 @@ void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode,
armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
}
+void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 1, 1, 10 };
+ std::vector<int32_t> outputShape { 1, 1, 1, 10 };
+
+ std::vector<float> inputValues
+ {
+ 1.0f,
+ 2.0f,
+ 3.0f,
+ 4.0f,
+ 5.0f,
+ 6.0f,
+ 7.0f,
+ 8.0f,
+ 9.0f,
+ 10.0f
+ };
+
+ const float approxInvL2Norm = 0.050964719f;
+ std::vector<float> expectedOutputValues
+ {
+ 1.0f * approxInvL2Norm,
+ 2.0f * approxInvL2Norm,
+ 3.0f * approxInvL2Norm,
+ 4.0f * approxInvL2Norm,
+ 5.0f * approxInvL2Norm,
+ 6.0f * approxInvL2Norm,
+ 7.0f * approxInvL2Norm,
+ 8.0f * approxInvL2Norm,
+ 9.0f * approxInvL2Norm,
+ 10.0f * approxInvL2Norm
+ };
+
+ NormalizationTest<float>(tflite::BuiltinOperator_L2_NORMALIZATION,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues);
+}
+
+void LocalResponseNormalizationTest(std::vector<armnn::BackendId>& backends,
+ int32_t radius,
+ float bias,
+ float alpha,
+ float beta)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 2, 2, 2, 1 };
+ std::vector<int32_t> outputShape { 2, 2, 2, 1 };
+
+ std::vector<float> inputValues
+ {
+ 1.0f, 2.0f,
+ 3.0f, 4.0f,
+ 5.0f, 6.0f,
+ 7.0f, 8.0f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
+ 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f
+ };
+
+ NormalizationTest<float>(tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ radius,
+ bias,
+ alpha,
+ beta);
+}
+
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/SoftmaxTest.cpp b/delegate/src/test/SoftmaxTest.cpp
index 3aacfe0a04..3339c09918 100644
--- a/delegate/src/test/SoftmaxTest.cpp
+++ b/delegate/src/test/SoftmaxTest.cpp
@@ -14,28 +14,6 @@
namespace armnnDelegate
{
-
-/// Convenience function to run softmax and log-softmax test cases
-/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
-/// \param backends armnn backends to target
-/// \param beta multiplicative parameter to the softmax function
-/// \param expectedOutput to be checked against transformed input
-void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
- std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
- std::vector<float> input = {
- 1.0, 2.5, 3.0, 4.5, 5.0,
- -1.0, -2.5, -3.0, -4.5, -5.0};
- std::vector<int32_t> shape = {2, 5};
-
- SoftmaxTest(operatorCode,
- tflite::TensorType_FLOAT32,
- backends,
- shape,
- input,
- expectedOutput,
- beta);
-}
-
TEST_SUITE ("Softmax_GpuAccTests")
{
@@ -66,36 +44,6 @@ TEST_CASE ("Log_Softmax_GpuAcc_Test")
}
} // TEST_SUITE ("Softmax_GpuAccTests")
-TEST_SUITE ("Softmax_CpuAccTests")
-{
-
-TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
- 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
-}
-
-TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- std::vector<float> expectedOutput = {
- 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
- 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
-}
-
-TEST_CASE ("Log_Softmax_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- std::vector<float> expectedOutput =
- {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
- -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
- SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
-}
-} // TEST_SUITE ("Softmax_CpuAccTests")
-
TEST_SUITE ("Softmax_CpuRefTests")
{
diff --git a/delegate/src/test/SoftmaxTestHelper.hpp b/delegate/src/test/SoftmaxTestHelper.hpp
index b3086bb0cb..bd32c212e9 100644
--- a/delegate/src/test/SoftmaxTestHelper.hpp
+++ b/delegate/src/test/SoftmaxTestHelper.hpp
@@ -167,4 +167,26 @@ void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
}
}
+
+/// Convenience function to run softmax and log-softmax test cases
+/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
+/// \param backends armnn backends to target
+/// \param beta multiplicative parameter to the softmax function
+/// \param expectedOutput to be checked against transformed input
+void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
+ std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
+ std::vector<float> input = {
+ 1.0, 2.5, 3.0, 4.5, 5.0,
+ -1.0, -2.5, -3.0, -4.5, -5.0};
+ std::vector<int32_t> shape = {2, 5};
+
+ SoftmaxTest(operatorCode,
+ tflite::TensorType_FLOAT32,
+ backends,
+ shape,
+ input,
+ expectedOutput,
+ beta);
+}
+
} // anonymous namespace