From bca73e1c82438f160364a113793d0a2195c665ac Mon Sep 17 00:00:00 2001 From: Jan Eilers Date: Wed, 11 Mar 2020 12:52:46 +0000 Subject: IVGCVSW-4444 Adding Elu end to end test * implemented activation layer end to end test * adds support for different tolerances in layer tests * added tests for Elu (Ref, Cl, Neon) Signed-off-by: Jan Eilers Change-Id: I81e28cfb4456e815bae2fb31f5c345134ff2432f --- .../test/ActivationEndToEndTestImpl.hpp | 143 +++++++++++++++++++++ src/backends/backendsCommon/test/CMakeLists.txt | 1 + .../backendsCommon/test/EndToEndTestImpl.hpp | 8 +- 3 files changed, 148 insertions(+), 4 deletions(-) create mode 100644 src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp (limited to 'src/backends/backendsCommon') diff --git a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp new file mode 100644 index 0000000000..23884b0c20 --- /dev/null +++ b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp @@ -0,0 +1,143 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "EndToEndTestImpl.hpp" + +#include +#include +#include +#include + +#include + +namespace +{ + +/** Defines the acceptable tolerance of ActivationFunction-DataType combinations. + * + * @param activationFunction The activation function used + * @param dataType Data type used + * + * @return Tolerance depending on the activation function and data type + */ +float GetActivationTolerance(const armnn::ActivationFunction& activationFunction, DataType dataType) +{ + constexpr float defaultTolerance = 1e-6f; + + switch (activationFunction) + { + // The following values are taken from ArmComputeLibrary/tests/validation/CL/ActivationLayer.cpp + case ActivationFunction::Elu: + return (dataType == DataType::Float16 ? 0.01f : 0.00001f); + default: + return defaultTolerance; + } +} + +/** Creates a network with one layer of the activation function specified in the activation descriptor. + * + * @param inputInfo Tensor info of inputs + * @param outputInfo Tensor info of outputs + * @param descriptor Activation descriptor + * + * @return INetworkPtr A pointer to the created network + */ +armnn::INetworkPtr CreateActivationNetwork(const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const armnn::ActivationDescriptor& descriptor) +{ + using namespace armnn; + + char const* ActivationName = GetActivationFunctionAsCString(descriptor.m_Function); + + INetworkPtr net(INetwork::Create()); + + IConnectableLayer* input = net->AddInputLayer(0, "input"); + IConnectableLayer* prelu = net->AddActivationLayer(descriptor, ActivationName); + IConnectableLayer* output = net->AddOutputLayer(0, "output"); + + Connect(input, prelu, inputInfo, 0, 0); + Connect(prelu, output, outputInfo, 0, 0); + + return net; +} + +/** Specifies the implementation of end to end tests for activation functions. + * + * - Converts input data and expected-output data to the data type that is desired for the test (ArmnnType) + * - Creates a network with one layer of the activation function specified in the activation descriptor. + * - Executes the network on specified backends and compares results to expected output values + * + * @tparam ArmnnType The armnn data type for the input and expected-output data + * @param backends Backends to run test on + * @param floatInputData Input data given as vector of float + * @param floatExpectedOutputData Expected output data given as vector of float + * @param inputInfo Tensor info of inputs + * @param outputInfo Tensor info of outputs + * @param descriptor Activation descriptor + */ +template> +void ActivationEndToEndImpl(const std::vector& backends, + const std::vector& floatInputData, + const std::vector& floatExpectedOutputData, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const armnn::ActivationDescriptor& descriptor) +{ + using namespace armnn; + + // Selectively quantizes/transforms float values to the needed data type + std::vector inputData = armnnUtils::QuantizedVector( floatInputData, + inputInfo.GetQuantizationScale(), + inputInfo.GetQuantizationOffset()); + std::vector expectedOutputData = armnnUtils::QuantizedVector( floatExpectedOutputData, + outputInfo.GetQuantizationScale(), + outputInfo.GetQuantizationOffset()); + + INetworkPtr net = CreateActivationNetwork(inputInfo, outputInfo, descriptor); + + std::map> inputTensorData = { { 0, inputData } }; + std::map> expectedOutputTensorData = { { 0, expectedOutputData } }; + + float tolerance = GetActivationTolerance(descriptor.m_Function, ArmnnType); + + EndToEndLayerTestImpl(move(net), + inputTensorData, + expectedOutputTensorData, + backends, + tolerance); +} + +/** Executes an end to end test for Elu activation with specific input and expected-output data + * + * @tparam ArmnnType The armnn data type for the input and expected-output data + * @param backends The backends on which to run the test + */ +template> +void EluEndToEndTest(const std::vector& backends) +{ + std::vector floatInputData{ -2.0f, -1.0f, -0.0f, 0.0f, + 1.0f, 2.0f, 3.0f, 4.0f }; + + std::vector floatExpectedOutputData{ -0.86466471676f, -0.63212055882f, -0.0f, 0.0f, + 1.0f , 2.0f, 3.0f, 4.0f }; + + float qScale = 1.0f; + int32_t qOffset = 0; + armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset); + + armnn::ActivationDescriptor descriptor(ActivationFunction::Elu, 1.0); + + ActivationEndToEndImpl(backends, + floatInputData, + floatExpectedOutputData, + inputInfo, + outputInfo, + descriptor); +} + +} // anonymous namespace \ No newline at end of file diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index dc8031f6b4..ea214de771 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -21,6 +21,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources DynamicBackendTests.hpp ElementwiseUnaryEndToEndTestImpl.hpp EndToEndTestImpl.hpp + ActivationEndToEndTestImpl.hpp GatherEndToEndTestImpl.hpp InstanceNormalizationEndToEndTestImpl.cpp InstanceNormalizationEndToEndTestImpl.hpp diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index 4221f626da..a4d0d505f8 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -107,7 +107,7 @@ inline bool ConstantUsageUint8Test(const std::vector& backends) // Utility template for comparing tensor elements template> -bool Compare(T a, T b) +bool Compare(T a, T b, float tolerance = 0.000001f) { if (ArmnnType == DataType::Boolean) { @@ -119,7 +119,6 @@ bool Compare(T a, T b) // NOTE: All other types can be cast to float and compared with // a certain level of tolerance - constexpr float tolerance = 0.000001f; return std::fabs(static_cast(a) - static_cast(b)) <= tolerance; } @@ -143,7 +142,8 @@ template>& inputTensorData, const std::map>& expectedOutputData, - std::vector backends) + std::vector backends, + float tolerance = 0.000001f) { // Create runtime in which test will run IRuntime::CreationOptions options; @@ -184,7 +184,7 @@ void EndToEndLayerTestImpl(INetworkPtr network, std::vector out = outputStorage.at(it.first); for (unsigned int i = 0; i < out.size(); ++i) { - BOOST_CHECK(Compare(it.second[i], out[i]) == true); + BOOST_CHECK(Compare(it.second[i], out[i], tolerance) == true); } } } -- cgit v1.2.1