aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2019-04-24 10:17:46 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2019-04-29 17:33:40 +0100
commit18515e2e77d04e5065aa0d2e850972d67446260d (patch)
tree4cd73c541a85af1dce65945f74f7b9b0436128f9
parente05e38e782f6c12301d069845fbfe353ab58da55 (diff)
downloadarmnn-18515e2e77d04e5065aa0d2e850972d67446260d.tar.gz
IVGCVSW-2983 activation layers support for QSymm16
!armnn:1023 Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ie4e5c4e7cb9edec690235393fff0721fca5da6cc
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp3
-rw-r--r--src/backends/backendsCommon/test/ActivationTestImpl.hpp354
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp48
-rw-r--r--src/backends/reference/RefLayerSupport.cpp5
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp30
5 files changed, 435 insertions, 5 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 66e30f0592..ca9d7d9c5e 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -301,7 +301,8 @@ void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
std::vector<DataType> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
diff --git a/src/backends/backendsCommon/test/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/ActivationTestImpl.hpp
index ca6130299b..1a5bcd4fbb 100644
--- a/src/backends/backendsCommon/test/ActivationTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ActivationTestImpl.hpp
@@ -377,6 +377,14 @@ LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
workloadFactory, memoryManager, 4.0f, 3);
}
+LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedSymm16>(
+ workloadFactory, memoryManager, 0.1f, 0);
+}
+
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleActivationTest(
armnn::IWorkloadFactory& workloadFactory,
@@ -492,6 +500,342 @@ LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
}
+LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return SimpleSigmoidTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ReLuTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ // Calculate output values for input.
+ auto f = [](float value)
+ {
+ return std::fmax(0.0f, value);
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::ReLu,
+ 0.f,
+ 0.f,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
+}
+
+LayerTestResult<int16_t, 4> ReLuInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> BoundedReLuTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+ const float a = 1.0f;
+ const float b = -1.0f;
+ // Calculate output values for input.
+ auto f = [a, b](float value)
+ {
+ return std::min(a, std::max(b, value));
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::BoundedReLu,
+ a,
+ b,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
+}
+
+LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SoftReLuTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ // Calculate output values for input.
+ auto f = [](float value)
+ {
+ return std::log(1.0f + std::exp(value));
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::SoftReLu,
+ 0.f,
+ 0.f,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
+}
+
+LayerTestResult<int16_t, 4> SoftReLuInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return SoftReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> LeakyReLuTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ const float a = 0.01f;
+ // Calculate output values for input.
+ auto f = [a](float value)
+ {
+ return value > 0.0f ? value : (value * a);
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::LeakyReLu,
+ a,
+ 0.f,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
+}
+
+LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return LeakyReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> AbsTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ // Calculate output values for input.
+ auto f = [](float value)
+ {
+ return std::abs(value);
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::Abs,
+ 0.f,
+ 0.f,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
+}
+
+LayerTestResult<int16_t, 4> AbsInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return AbsTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SqrtTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset)
+{
+ std::vector<float> inputData = {
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ // Calculate output values for input.
+ auto f = [](float value)
+ {
+ return std::sqrt(value);
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::Sqrt,
+ 0.f,
+ 0.f,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
+}
+
+LayerTestResult<int16_t, 4> SqrtInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return SqrtTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SquareTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ // Calculate output values for input.
+ auto f = [](float value)
+ {
+ return std::pow(value,2);
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::Square,
+ 0.f,
+ 0.f,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
+}
+
+LayerTestResult<int16_t, 4> SquareInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return SquareTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> TanhTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ const float a = 2.0f;
+ const float b = 3.0f;
+ // Calculate output values for input.
+ auto f = [a, b](float value)
+ {
+ return a * tanhf(b * value);
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::TanH,
+ a,
+ b,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
+}
+
+LayerTestResult<int16_t, 4> TanhInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return TanhTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+
+
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> CompareActivationTestImpl(
armnn::IWorkloadFactory& workloadFactory,
@@ -606,3 +950,13 @@ LayerTestResult<uint8_t,4> CompareActivationUint8Test(
return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
}
+
+LayerTestResult<int16_t,4> CompareActivationInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ armnn::ActivationFunction f)
+{
+ return CompareActivationTestImpl<armnn::DataType::QuantisedSymm16>(
+ workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
+}
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index c748be1fc6..762e581ce4 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -494,6 +494,10 @@ LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<float, 4> CompareConvolution2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -642,6 +646,40 @@ LayerTestResult<float, 4> CompareBoundedReLuTest(
float upperBound,
float lowerBound);
+
+LayerTestResult<int16_t, 4> ReLuInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> SoftReLuInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> AbsInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> SqrtInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> SquareInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> TanhInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+
// Tests that the output should be identical to the input when the output dimensions match the input ones.
LayerTestResult<float, 4> ResizeBilinearNopTest(
armnn::IWorkloadFactory& workloadFactory,
@@ -823,6 +861,12 @@ LayerTestResult<uint8_t, 4> CompareActivationUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::ActivationFunction f);
+LayerTestResult<int16_t, 4> CompareActivationInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ armnn::ActivationFunction f);
+
LayerTestResult<uint8_t, 2> CompareSoftmaxUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -905,6 +949,10 @@ LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 3512d52acf..1b1f0ce1c6 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -167,9 +167,10 @@ bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
bool supported = true;
// Define supported types.
- std::array<DataType,2> supportedTypes = {
+ std::array<DataType,3> supportedTypes = {
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index cbc56d14b7..04b215a927 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -167,9 +167,10 @@ ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test)
ARMNN_AUTO_TEST_CASE(AsymmNonSquarePooling2d, AsymmetricNonSquarePooling2dTest)
ARMNN_AUTO_TEST_CASE(AsymmNonSquarePooling2dUint8, AsymmetricNonSquarePooling2dUint8Test)
-// Activation
+// Linear Activation
ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest)
ARMNN_AUTO_TEST_CASE(ConstantLinearActivationUint8, ConstantLinearActivationUint8Test)
+ARMNN_AUTO_TEST_CASE(ConstantLinearActivationInt16, ConstantLinearActivationInt16Test)
// Normalization
ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
@@ -188,14 +189,39 @@ ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxUint8, Simple3dSoftmaxUint8Test, 1.0f)
ARMNN_AUTO_TEST_CASE(Simple4dSoftmax, Simple4dSoftmaxTest, 1.0f)
ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxUint8, Simple4dSoftmaxUint8Test, 1.0f)
-// Sigmoid
+// Sigmoid Activation
ARMNN_AUTO_TEST_CASE(SimpleSigmoid, SimpleSigmoidTest)
ARMNN_AUTO_TEST_CASE(SimpleSigmoidUint8, SimpleSigmoidUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleSigmoidInt16, SimpleSigmoidInt16Test)
+// BoundedReLU Activation
ARMNN_AUTO_TEST_CASE(ReLu1, BoundedReLuUpperAndLowerBoundTest)
ARMNN_AUTO_TEST_CASE(ReLu6, BoundedReLuUpperBoundOnlyTest)
ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest)
ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest)
+ARMNN_AUTO_TEST_CASE(BoundedReLuInt16, BoundedReLuInt16Test)
+
+// ReLU Activation
+ARMNN_AUTO_TEST_CASE(ReLuInt16, ReLuInt16Test)
+
+// SoftReLU Activation
+ARMNN_AUTO_TEST_CASE(SoftReLuInt16, SoftReLuInt16Test)
+
+// LeakyReLU Activation
+ARMNN_AUTO_TEST_CASE(LeakyReLuInt16, LeakyReLuInt16Test)
+
+// Abs Activation
+ARMNN_AUTO_TEST_CASE(AbsInt16, AbsInt16Test)
+
+// Sqrt Activation
+ARMNN_AUTO_TEST_CASE(SqrtInt16, SqrtInt16Test)
+
+// Square Activation
+ARMNN_AUTO_TEST_CASE(SquareInt16, SquareInt16Test)
+
+// Tanh Activation
+ARMNN_AUTO_TEST_CASE(TanhInt16, TanhInt16Test)
+
// Fully Conected
ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)