aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/QuantizerTest.cpp
diff options
context:
space:
mode:
authorDavid Monahan <david.monahan@arm.com>2020-02-25 09:03:29 +0000
committerDavid Monahan <david.monahan@arm.com>2020-02-28 09:47:17 +0000
commit3b3c381963a1bfe12e083928a3abb5a9852b199b (patch)
tree7f6b2d7416240490172e01070953d949d32b4407 /src/armnn/test/QuantizerTest.cpp
parentdd3f71b64072c44cec65a7a883d0c3a29659645c (diff)
downloadarmnn-3b3c381963a1bfe12e083928a3abb5a9852b199b.tar.gz
IVGCVSW-4439: Adding Elu support to Activation
* Added CpuRef implementation * Added Unit Tests * Added Quantizer Test * Enabled Tests for Neon and CL backends on fp32 only * Added to Serializer Signed-off-by: David Monahan <david.monahan@arm.com> Change-Id: Ic23e1797dbc9352b40678c389d7fe2b836b582ea
Diffstat (limited to 'src/armnn/test/QuantizerTest.cpp')
-rw-r--r--src/armnn/test/QuantizerTest.cpp56
1 files changed, 56 insertions, 0 deletions
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index faadf24892..6d5d212fc9 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -707,6 +707,62 @@ BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation)
VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
}
+
+BOOST_AUTO_TEST_CASE(QuantizeELuActivation)
+{
+ class TestEluActivationQuantization : public TestQuantization
+ {
+ public:
+ TestEluActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
+ : TestQuantization(inputShape, outputShape) {}
+
+ TestEluActivationQuantization(const QuantizerOptions& options,
+ const TensorShape& inputShape,
+ const TensorShape& outputShape)
+ : TestQuantization(options, inputShape, outputShape) {}
+
+ void VisitActivationLayer(const IConnectableLayer* layer,
+ const ActivationDescriptor& descriptor,
+ const char* name = nullptr) override
+ {
+ boost::ignore_unused(descriptor, name);
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+ // Based off default static range [-15.0f, 15.0f]
+ TestQuantizationParams(
+ info, {30.0f / g_AsymmU8QuantizationBase, 128},
+ {30.0f / g_AsymmS8QuantizationBase, 0},
+ {15.0f / g_SymmS8QuantizationBase, 0},
+ {15.0f / g_SymmS16QuantizationBase, 0});
+ }
+ };
+
+ ActivationDescriptor descriptor;
+ descriptor.m_Function = ActivationFunction::Elu;
+
+ const TensorShape shape{1U};
+ INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
+
+ INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
+ TestEluActivationQuantization validatorQAsymmU8(shape, shape);
+ VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
+
+ const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
+ INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
+ TestEluActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
+ VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
+
+ const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
+ INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
+ TestEluActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
+ VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
+
+ const QuantizerOptions qSymmS16options(DataType::QSymmS16);
+ INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
+ TestEluActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
+ VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+}
+
BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
{
class TestBatchNormalizationQuantization : public TestQuantization