ArmNN
 21.11
ActivationEndToEndTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "EndToEndTestImpl.hpp"
8 
9 #include <armnn/INetwork.hpp>
10 #include <armnn/TypesUtils.hpp>
12 #include <ResolveType.hpp>
13 
14 namespace
15 {
16 
17 /** Defines the acceptable tolerance of ActivationFunction-DataType combinations.
18  *
19  * @param activationFunction The activation function used
20  * @param dataType Data type used
21  *
22  * @return Tolerance depending on the activation function and data type
23  */
24 float GetActivationTolerance(const armnn::ActivationFunction& activationFunction, DataType dataType)
25 {
26  constexpr float defaultTolerance = 1e-6f;
27 
28  switch (activationFunction)
29  {
30  // The following values are taken from ArmComputeLibrary/tests/validation/CL/ActivationLayer.cpp
31  case ActivationFunction::Elu:
32  return (dataType == DataType::Float16 ? 0.01f : 0.00001f);
33  case ActivationFunction::HardSwish:
34  return (dataType == DataType::Float16 ? 0.01f : defaultTolerance);
35  default:
36  return defaultTolerance;
37  }
38 }
39 
40 /** Creates a network with one layer of the activation function specified in the activation descriptor.
41  *
42  * @param inputInfo Tensor info of inputs
43  * @param outputInfo Tensor info of outputs
44  * @param descriptor Activation descriptor
45  *
46  * @return INetworkPtr A pointer to the created network
47  */
48 armnn::INetworkPtr CreateActivationNetwork(const armnn::TensorInfo& inputInfo,
49  const armnn::TensorInfo& outputInfo,
50  const armnn::ActivationDescriptor& descriptor)
51 {
52  using namespace armnn;
53 
54  char const* ActivationName = GetActivationFunctionAsCString(descriptor.m_Function);
55 
57 
58  IConnectableLayer* input = net->AddInputLayer(0, "input");
59  IConnectableLayer* prelu = net->AddActivationLayer(descriptor, ActivationName);
60  IConnectableLayer* output = net->AddOutputLayer(0, "output");
61 
62  Connect(input, prelu, inputInfo, 0, 0);
63  Connect(prelu, output, outputInfo, 0, 0);
64 
65  return net;
66 }
67 
68 /** Specifies the implementation of end to end tests for activation functions.
69  *
70  * - Converts input data and expected-output data to the data type that is desired for the test (ArmnnType)
71  * - Creates a network with one layer of the activation function specified in the activation descriptor.
72  * - Executes the network on specified backends and compares results to expected output values
73  *
74  * @tparam ArmnnType The armnn data type for the input and expected-output data
75  * @param backends Backends to run test on
76  * @param floatInputData Input data given as vector of float
77  * @param floatExpectedOutputData Expected output data given as vector of float
78  * @param inputInfo Tensor info of inputs
79  * @param outputInfo Tensor info of outputs
80  * @param descriptor Activation descriptor
81  */
82 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
83 void ActivationEndToEndImpl(const std::vector<armnn::BackendId>& backends,
84  const std::vector<float>& floatInputData,
85  const std::vector<float>& floatExpectedOutputData,
86  const armnn::TensorInfo& inputInfo,
87  const armnn::TensorInfo& outputInfo,
88  const armnn::ActivationDescriptor& descriptor)
89 {
90  using namespace armnn;
91 
92  // Selectively quantizes/transforms float values to the needed data type
93  std::vector<T> inputData = armnnUtils::QuantizedVector<T>( floatInputData,
94  inputInfo.GetQuantizationScale(),
95  inputInfo.GetQuantizationOffset());
96  std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>( floatExpectedOutputData,
97  outputInfo.GetQuantizationScale(),
98  outputInfo.GetQuantizationOffset());
99 
100  INetworkPtr net = CreateActivationNetwork(inputInfo, outputInfo, descriptor);
101 
102  std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
103  std::map<int, std::vector<T>> expectedOutputTensorData = { { 0, expectedOutputData } };
104 
105  float tolerance = GetActivationTolerance(descriptor.m_Function, ArmnnType);
106 
107  EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net),
108  inputTensorData,
109  expectedOutputTensorData,
110  backends,
111  tolerance);
112 }
113 
114 /** Executes an end to end test for Elu activation with specific input and expected-output data
115  *
116  * @tparam ArmnnType The armnn data type for the input and expected-output data
117  * @param backends The backends on which to run the test
118  */
119 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
120 void EluEndToEndTest(const std::vector<BackendId>& backends)
121 {
122  std::vector<float> floatInputData{ -2.0f, -1.0f, -0.0f, 0.0f,
123  1.0f, 2.0f, 3.0f, 4.0f };
124 
125  std::vector<float> floatExpectedOutputData{ -0.86466471676f, -0.63212055882f, -0.0f, 0.0f,
126  1.0f , 2.0f , 3.0f, 4.0f };
127 
128  float qScale = 1.0f;
129  int32_t qOffset = 0;
130  armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
131  armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
132 
134 
135  ActivationEndToEndImpl<ArmnnType>(backends,
136  floatInputData,
137  floatExpectedOutputData,
138  inputInfo,
139  outputInfo,
140  descriptor);
141 }
142 
143 /** Executes an end to end test for HardSwish activation with specific input and expected-output data
144  *
145  * @tparam ArmnnType The armnn data type for the input and expected-output data
146  * @param backends The backends on which to run the test
147  */
148 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
149 void HardSwishEndToEndTest(const std::vector<BackendId>& backends)
150 {
151  std::vector<float> floatInputData{ -2.0f, -1.0f, -0.5f, 0.0f,
152  1.0f, 2.0f, 3.0f, 4.0f };
153 
154  std::vector<float> floatExpectedOutputData{ -0.33333333333f, -0.33333333333f, -0.208333f, 0.0f,
155  0.66666666667f, 1.66666666667f, 3.0f , 4.0f };
156 
157  float qScale = 1.0f;
158  int32_t qOffset = 0;
159  armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
160  armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
161 
163 
164  ActivationEndToEndImpl<ArmnnType>(backends,
165  floatInputData,
166  floatExpectedOutputData,
167  inputInfo,
168  outputInfo,
169  descriptor);
170 }
171 
172 } // anonymous namespace
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
Copyright (c) 2021 ARM Limited and Contributors.
DataType
Definition: Types.hpp:35
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
constexpr char const * GetActivationFunctionAsCString(ActivationFunction activation)
Definition: TypesUtils.hpp:27
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:197
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:478
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
ActivationFunction
Definition: Types.hpp:73