diff options
author | FrancisMurtagh <francis.murtagh@arm.com> | 2019-02-12 16:52:24 +0000 |
---|---|---|
committer | Francis Murtagh <francis.murtagh@arm.com> | 2019-02-12 16:58:26 +0000 |
commit | fb1437e86d8e01af9ee9cebe4c8cd9ff508ac779 (patch) | |
tree | 609fa13934dd10944efa6c43dc1f2966e82ba309 /src/armnn/test/QuantizerTest.cpp | |
parent | 8a4ca10cca8d5dadb52124187c209c955df7a8b6 (diff) | |
download | armnn-fb1437e86d8e01af9ee9cebe4c8cd9ff508ac779.tar.gz |
IVGCVSW-2614 Support static quantization of Permute
Change-Id: I4f208d18f7ac6bb04423cc26ecf61adf0ccc5d26
Signed-off-by: FrancisMurtagh <francis.murtagh@arm.com>
Diffstat (limited to 'src/armnn/test/QuantizerTest.cpp')
-rw-r--r-- | src/armnn/test/QuantizerTest.cpp | 81 |
1 files changed, 67 insertions, 14 deletions
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index a130c1ff80..319143ed5d 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -261,26 +261,26 @@ BOOST_AUTO_TEST_CASE(QuantizeTanHActivation) VisitLayersTopologically(quantizedNetwork.get(), validator); } -BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation) +class TestLeakyReLuActivationQuantization : public TestQuantization { - class TestLeakyReLuActivationQuantization : public TestQuantization +public: + virtual void VisitActivationLayer(const IConnectableLayer* layer, + const ActivationDescriptor& descriptor, + const char* name = nullptr) { - public: - virtual void VisitActivationLayer(const IConnectableLayer* layer, - const ActivationDescriptor& descriptor, - const char* name = nullptr) - { - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); + TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); - BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8)); + BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8)); - BOOST_TEST((info.GetQuantizationOffset() == 64)); + BOOST_TEST((info.GetQuantizationOffset() == 64)); - // Based off current static value [-5.0f, 15.0f] - BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f); - } - }; + // Based off current static value [-5.0f, 15.0f] + BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f); + } +}; +BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation) +{ ActivationDescriptor descriptor; descriptor.m_Function = ActivationFunction::LeakyReLu; descriptor.m_A = 3.5f; @@ -766,5 +766,58 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax) VisitLayersTopologically(quantizedNetwork.get(), validator); } +BOOST_AUTO_TEST_CASE(QuantizePermute) +{ + + class TestPermuteQuantization : public TestLeakyReLuActivationQuantization + { + public: + virtual void VisitPermuteLayer(const IConnectableLayer* layer, + const PermuteDescriptor& desc, + const char* name = nullptr) + { + TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); + + BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8)); + + BOOST_TEST((info.GetQuantizationOffset() == 64)); + + // Based off parent LeakyReLu [-5.f, 15.f] + BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f); + } + }; + + auto network = INetwork::Create(); + + TensorShape shape{1U}; + TensorInfo info(shape, DataType::Float32); + + PermuteDescriptor desc; + ActivationDescriptor activationDescriptor; + activationDescriptor.m_Function = ActivationFunction::LeakyReLu; + activationDescriptor.m_A = 3.5f; + activationDescriptor.m_B = -10.0f; + + // Add the layers + IConnectableLayer* input0 = network->AddInputLayer(0); + IConnectableLayer* activation = network->AddActivationLayer(activationDescriptor); + IConnectableLayer* permute = network->AddPermuteLayer(desc); + IConnectableLayer* output = network->AddOutputLayer(3); + + // Establish connections + input0->GetOutputSlot(0).Connect(activation->GetInputSlot(0)); + activation->GetOutputSlot(0).Connect(permute->GetInputSlot(0)); + permute->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + //Set TensorInfo + input0->GetOutputSlot(0).SetTensorInfo(info); + activation->GetOutputSlot(0).SetTensorInfo(info); + permute->GetOutputSlot(0).SetTensorInfo(info); + + auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork(); + TestPermuteQuantization validator; + VisitLayersTopologically(quantizedNetwork.get(), validator); +} + BOOST_AUTO_TEST_SUITE_END() } // namespace armnn |