aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2020-03-11 12:52:46 +0000
committerJim Flynn <jim.flynn@arm.com>2020-03-18 17:50:48 +0000
commitbca73e1c82438f160364a113793d0a2195c665ac (patch)
tree0ebb17f7ce546af5bf1641e7a946d424f0f87b0a
parent3184c907b2420e6c66485529f336251b2b62aecf (diff)
downloadarmnn-bca73e1c82438f160364a113793d0a2195c665ac.tar.gz
IVGCVSW-4444 Adding Elu end to end test
* implemented activation layer end to end test * adds support for different tolerances in layer tests * added tests for Elu (Ref, Cl, Neon) Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: I81e28cfb4456e815bae2fb31f5c345134ff2432f
-rw-r--r--include/armnn/Descriptors.hpp12
-rw-r--r--src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp143
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt1
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp8
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp11
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp11
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp31
7 files changed, 211 insertions, 6 deletions
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index f1b29cc6c7..57917261d4 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -25,15 +25,23 @@ struct ActivationDescriptor
, m_B(0)
{}
+ ActivationDescriptor(armnn::ActivationFunction activation,
+ float a = 0,
+ float b = 0)
+ : m_Function(activation)
+ , m_A(a)
+ , m_B(b)
+ {}
+
bool operator ==(const ActivationDescriptor &rhs) const
{
return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
}
/// @brief The activation function to use
- /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
+ /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
ActivationFunction m_Function;
- /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
+ /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
float m_A;
/// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
float m_B;
diff --git a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..23884b0c20
--- /dev/null
+++ b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
@@ -0,0 +1,143 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "EndToEndTestImpl.hpp"
+
+#include <armnn/INetwork.hpp>
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/test/CommonTestUtils.hpp>
+#include <ResolveType.hpp>
+
+#include <boost/test/unit_test_log.hpp>
+
+namespace
+{
+
+/** Defines the acceptable tolerance of ActivationFunction-DataType combinations.
+ *
+ * @param activationFunction The activation function used
+ * @param dataType Data type used
+ *
+ * @return Tolerance depending on the activation function and data type
+ */
+float GetActivationTolerance(const armnn::ActivationFunction& activationFunction, DataType dataType)
+{
+ constexpr float defaultTolerance = 1e-6f;
+
+ switch (activationFunction)
+ {
+ // The following values are taken from ArmComputeLibrary/tests/validation/CL/ActivationLayer.cpp
+ case ActivationFunction::Elu:
+ return (dataType == DataType::Float16 ? 0.01f : 0.00001f);
+ default:
+ return defaultTolerance;
+ }
+}
+
+/** Creates a network with one layer of the activation function specified in the activation descriptor.
+ *
+ * @param inputInfo Tensor info of inputs
+ * @param outputInfo Tensor info of outputs
+ * @param descriptor Activation descriptor
+ *
+ * @return INetworkPtr A pointer to the created network
+ */
+armnn::INetworkPtr CreateActivationNetwork(const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ const armnn::ActivationDescriptor& descriptor)
+{
+ using namespace armnn;
+
+ char const* ActivationName = GetActivationFunctionAsCString(descriptor.m_Function);
+
+ INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input = net->AddInputLayer(0, "input");
+ IConnectableLayer* prelu = net->AddActivationLayer(descriptor, ActivationName);
+ IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+ Connect(input, prelu, inputInfo, 0, 0);
+ Connect(prelu, output, outputInfo, 0, 0);
+
+ return net;
+}
+
+/** Specifies the implementation of end to end tests for activation functions.
+ *
+ * - Converts input data and expected-output data to the data type that is desired for the test (ArmnnType)
+ * - Creates a network with one layer of the activation function specified in the activation descriptor.
+ * - Executes the network on specified backends and compares results to expected output values
+ *
+ * @tparam ArmnnType The armnn data type for the input and expected-output data
+ * @param backends Backends to run test on
+ * @param floatInputData Input data given as vector of float
+ * @param floatExpectedOutputData Expected output data given as vector of float
+ * @param inputInfo Tensor info of inputs
+ * @param outputInfo Tensor info of outputs
+ * @param descriptor Activation descriptor
+ */
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ActivationEndToEndImpl(const std::vector<armnn::BackendId>& backends,
+ const std::vector<float>& floatInputData,
+ const std::vector<float>& floatExpectedOutputData,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ const armnn::ActivationDescriptor& descriptor)
+{
+ using namespace armnn;
+
+ // Selectively quantizes/transforms float values to the needed data type
+ std::vector<T> inputData = armnnUtils::QuantizedVector<T>( floatInputData,
+ inputInfo.GetQuantizationScale(),
+ inputInfo.GetQuantizationOffset());
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>( floatExpectedOutputData,
+ outputInfo.GetQuantizationScale(),
+ outputInfo.GetQuantizationOffset());
+
+ INetworkPtr net = CreateActivationNetwork(inputInfo, outputInfo, descriptor);
+
+ std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
+ std::map<int, std::vector<T>> expectedOutputTensorData = { { 0, expectedOutputData } };
+
+ float tolerance = GetActivationTolerance(descriptor.m_Function, ArmnnType);
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net),
+ inputTensorData,
+ expectedOutputTensorData,
+ backends,
+ tolerance);
+}
+
+/** Executes an end to end test for Elu activation with specific input and expected-output data
+ *
+ * @tparam ArmnnType The armnn data type for the input and expected-output data
+ * @param backends The backends on which to run the test
+ */
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void EluEndToEndTest(const std::vector<BackendId>& backends)
+{
+ std::vector<float> floatInputData{ -2.0f, -1.0f, -0.0f, 0.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f };
+
+ std::vector<float> floatExpectedOutputData{ -0.86466471676f, -0.63212055882f, -0.0f, 0.0f,
+ 1.0f , 2.0f, 3.0f, 4.0f };
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+ armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+
+ armnn::ActivationDescriptor descriptor(ActivationFunction::Elu, 1.0);
+
+ ActivationEndToEndImpl<ArmnnType>(backends,
+ floatInputData,
+ floatExpectedOutputData,
+ inputInfo,
+ outputInfo,
+ descriptor);
+}
+
+} // anonymous namespace \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index dc8031f6b4..ea214de771 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -21,6 +21,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
DynamicBackendTests.hpp
ElementwiseUnaryEndToEndTestImpl.hpp
EndToEndTestImpl.hpp
+ ActivationEndToEndTestImpl.hpp
GatherEndToEndTestImpl.hpp
InstanceNormalizationEndToEndTestImpl.cpp
InstanceNormalizationEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 4221f626da..a4d0d505f8 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -107,7 +107,7 @@ inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
// Utility template for comparing tensor elements
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
-bool Compare(T a, T b)
+bool Compare(T a, T b, float tolerance = 0.000001f)
{
if (ArmnnType == DataType::Boolean)
{
@@ -119,7 +119,6 @@ bool Compare(T a, T b)
// NOTE: All other types can be cast to float and compared with
// a certain level of tolerance
- constexpr float tolerance = 0.000001f;
return std::fabs(static_cast<float>(a) - static_cast<float>(b)) <= tolerance;
}
@@ -143,7 +142,8 @@ template<DataType ArmnnIType, DataType ArmnnOType,
void EndToEndLayerTestImpl(INetworkPtr network,
const std::map<int, std::vector<TInput>>& inputTensorData,
const std::map<int, std::vector<TOutput>>& expectedOutputData,
- std::vector<BackendId> backends)
+ std::vector<BackendId> backends,
+ float tolerance = 0.000001f)
{
// Create runtime in which test will run
IRuntime::CreationOptions options;
@@ -184,7 +184,7 @@ void EndToEndLayerTestImpl(INetworkPtr network,
std::vector<TOutput> out = outputStorage.at(it.first);
for (unsigned int i = 0; i < out.size(); ++i)
{
- BOOST_CHECK(Compare<ArmnnOType>(it.second[i], out[i]) == true);
+ BOOST_CHECK(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true);
}
}
}
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 05f9538d6f..8b4fbbbd1b 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -5,6 +5,7 @@
#include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/ActivationEndToEndTestImpl.hpp>
#include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
#include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
#include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
@@ -131,6 +132,16 @@ BOOST_AUTO_TEST_CASE(ClStridedSliceInvalidSliceEndToEndTest)
StridedSliceInvalidSliceEndToEndTest(defaultBackends);
}
+BOOST_AUTO_TEST_CASE(ClEluEndToEndTestFloat32)
+{
+ EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClEluEndToEndTestFloat16)
+{
+ EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+}
+
BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest)
{
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 081b8af60a..b2c43e12c3 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -5,6 +5,7 @@
#include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/ActivationEndToEndTestImpl.hpp>
#include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
#include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
#include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
@@ -207,6 +208,16 @@ BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
}
+BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat32)
+{
+ EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat16)
+{
+ EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+}
+
BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
{
PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index bdda12f392..83f947ce09 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -5,6 +5,7 @@
#include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/ActivationEndToEndTestImpl.hpp>
#include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
#include <backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp>
#include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
@@ -550,6 +551,36 @@ BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test)
ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
+BOOST_AUTO_TEST_CASE(RefEluEndToEndTestFloat32)
+{
+ EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefEluEndToEndTestFloat16)
+{
+ EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefEluEndToEndTestBFloat16)
+{
+ EluEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQAsymmS8)
+{
+ EluEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQAsymmU8)
+{
+ EluEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQSymmS16)
+{
+ EluEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends);
+}
+
BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
{
GatherEndToEnd<armnn::DataType::Float32>(defaultBackends);