1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "Activation.hpp"
#include <boost/log/trivial.hpp>
#include <cmath>
namespace armnn
{
float Activation(float in,
ActivationFunction function,
float a,
float b)
{
float output;
// Compute the result of the activation function.
switch (function)
{
case ActivationFunction::Linear:
{
output = a * in + b;
break;
}
case ActivationFunction::Sigmoid:
{
output = 1.f / (1.f + expf(-in));
break;
}
case ActivationFunction::ReLu:
{
output = std::max(0.f, in);
break;
}
case ActivationFunction::BoundedReLu:
{
output = std::min(a, std::max(b, in));
break;
}
case ActivationFunction::SoftReLu:
{
output = logf(1.0f + expf(in));
break;
}
case ActivationFunction::LeakyReLu:
{
output = in > 0.0f ? in : (in * a);
break;
}
case ActivationFunction::Abs:
{
output = in < 0 ? -in : in;
break;
}
case ActivationFunction::Sqrt:
{
output = sqrtf(in);
break;
}
case ActivationFunction::Square:
{
output = in * in;
break;
}
case ActivationFunction::TanH:
{
output = a * tanhf(b * in);
break;
}
default:
{
throw InvalidArgumentException("Unsupported activation function");
}
}
return output;
}
void Activation(Decoder<float>& in,
Encoder<float>& out,
const TensorInfo& tensorInfo,
ActivationFunction function,
float a,
float b)
{
for (size_t i = 0; i<tensorInfo.GetNumElements(); i++)
{
out.Set(Activation(in.Get(), function, a, b));
++in;
++out;
}
}
void Activation(const float* in,
float* out,
const TensorInfo& tensorInfo,
ActivationFunction function,
float a,
float b)
{
for (size_t i = 0; i<tensorInfo.GetNumElements(); i++)
{
out[i] = Activation(in[i], function, a, b);
}
}
} //namespace armnn
|