From fa6e9e00d47761ddceac07482071519bccd46416 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Wed, 13 Feb 2019 12:06:46 +0000 Subject: IVGCVSW-2617 Add static quantization of SpaceToBatch * Some refactor of the existing QuantizePermute test for reuse Change-Id: Ifaf1afc476bad348f260fa5340c0153e7ade2703 Signed-off-by: Jim Flynn --- src/armnn/QuantizerVisitor.cpp | 9 ++++ src/armnn/QuantizerVisitor.hpp | 4 ++ src/armnn/StaticRangeVisitor.cpp | 10 ++++ src/armnn/StaticRangeVisitor.hpp | 4 ++ src/armnn/test/QuantizerTest.cpp | 114 +++++++++++++++++++++++++++++---------- 5 files changed, 114 insertions(+), 27 deletions(-) diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index df5dd59677..e808d4799d 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -215,6 +215,15 @@ void QuantizerVisitor::VisitPermuteLayer(const IConnectableLayer* layer, SetQuantizedInputConnections(layer, newLayer); } +void QuantizerVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, + const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, + const char* name) +{ + IConnectableLayer* newLayer = m_QuantizedNetwork->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name); + RecordLayer(layer, newLayer); + SetQuantizedInputConnections(layer, newLayer); +} + void QuantizerVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer, const SoftmaxDescriptor& softmaxDescriptor, const char* name) diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp index 409d808c5b..a39e6f13ad 100644 --- a/src/armnn/QuantizerVisitor.hpp +++ b/src/armnn/QuantizerVisitor.hpp @@ -72,6 +72,10 @@ public: const PermuteDescriptor& permuteDescriptor, const char* name = nullptr) override; + void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, + const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, + const char* name = nullptr) override; + /// Extract the quantized network INetworkPtr RetrieveFinalNetwork() { return std::move(m_QuantizedNetwork); } diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp index ecdc1ad957..6fc2bc6762 100644 --- a/src/armnn/StaticRangeVisitor.cpp +++ b/src/armnn/StaticRangeVisitor.cpp @@ -121,6 +121,16 @@ void StaticRangeVisitor::VisitPermuteLayer(const IConnectableLayer* layer, const char* name) { boost::ignore_unused(permuteDescriptor); + boost::ignore_unused(name); + ForwardParentParameters(layer); +} + +void StaticRangeVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, + const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, + const char* name) +{ + boost::ignore_unused(spaceToBatchNdDescriptor); + boost::ignore_unused(name); ForwardParentParameters(layer); } diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp index a69df8b705..ef3f4e4073 100644 --- a/src/armnn/StaticRangeVisitor.hpp +++ b/src/armnn/StaticRangeVisitor.hpp @@ -59,6 +59,10 @@ public: const PermuteDescriptor& permuteDescriptor, const char* name) override; + void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, + const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, + const char* name = nullptr) override; + void VisitSoftmaxLayer(const IConnectableLayer* layer, const SoftmaxDescriptor& softmaxDescriptor, const char* name = nullptr) override; diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 319143ed5d..657a87abbc 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -277,6 +277,20 @@ public: // Based off current static value [-5.0f, 15.0f] BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f); } +protected: + // used by the descendant classes which test layers + // that are forwarding their parent layer settings + void CheckForwardedQuantizationSettings(const IConnectableLayer* layer) + { + TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); + + BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8)); + + BOOST_TEST((info.GetQuantizationOffset() == 64)); + + // Based off parent LeakyReLu [-5.f, 15.f] + BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f); + } }; BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation) @@ -766,9 +780,45 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax) VisitLayersTopologically(quantizedNetwork.get(), validator); } -BOOST_AUTO_TEST_CASE(QuantizePermute) +IConnectableLayer* CreateStartOfLeakyReluNetwork(INetwork* network, const TensorInfo& info) +{ + ActivationDescriptor activationDescriptor; + activationDescriptor.m_Function = ActivationFunction::LeakyReLu; + activationDescriptor.m_A = 3.5f; + activationDescriptor.m_B = -10.0f; + + // Add the layers + IConnectableLayer* input0 = network->AddInputLayer(0); + IConnectableLayer* activation = network->AddActivationLayer(activationDescriptor); + + // Establish connections + input0->GetOutputSlot(0).Connect(activation->GetInputSlot(0)); + + //Set TensorInfo + input0->GetOutputSlot(0).SetTensorInfo(info); + activation->GetOutputSlot(0).SetTensorInfo(info); + + return activation; +} + +void CompleteLeakyReluNetwork(INetwork* network, + IConnectableLayer* activation, + IConnectableLayer* layerUnderTest, + const TensorInfo& info) { + // Add the output Layer + IConnectableLayer* output = network->AddOutputLayer(3); + + // Establish connections + activation->GetOutputSlot(0).Connect(layerUnderTest->GetInputSlot(0)); + layerUnderTest->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + //Set TensorInfo + layerUnderTest->GetOutputSlot(0).SetTensorInfo(info); +} + +BOOST_AUTO_TEST_CASE(QuantizePermute) +{ class TestPermuteQuantization : public TestLeakyReLuActivationQuantization { public: @@ -776,46 +826,56 @@ BOOST_AUTO_TEST_CASE(QuantizePermute) const PermuteDescriptor& desc, const char* name = nullptr) { - TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); + CheckForwardedQuantizationSettings(layer); + } + }; - BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8)); + INetworkPtr network = INetwork::Create(); + + TensorShape shape{1U}; + TensorInfo info(shape, DataType::Float32); + + IConnectableLayer* activation = CreateStartOfLeakyReluNetwork(network.get(), info); + + // Add the layer under test + PermuteDescriptor desc; + IConnectableLayer* permute = network->AddPermuteLayer(desc); - BOOST_TEST((info.GetQuantizationOffset() == 64)); + CompleteLeakyReluNetwork(network.get(), activation, permute, info); - // Based off parent LeakyReLu [-5.f, 15.f] - BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f); + auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork(); + TestPermuteQuantization validator; + VisitLayersTopologically(quantizedNetwork.get(), validator); +} + +BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch) +{ + class TestSpaceToBatchQuantization : public TestLeakyReLuActivationQuantization + { + public: + virtual void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer, + const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, + const char* name = nullptr) override + { + CheckForwardedQuantizationSettings(layer); } }; - auto network = INetwork::Create(); + INetworkPtr network = INetwork::Create(); TensorShape shape{1U}; TensorInfo info(shape, DataType::Float32); - PermuteDescriptor desc; - ActivationDescriptor activationDescriptor; - activationDescriptor.m_Function = ActivationFunction::LeakyReLu; - activationDescriptor.m_A = 3.5f; - activationDescriptor.m_B = -10.0f; - - // Add the layers - IConnectableLayer* input0 = network->AddInputLayer(0); - IConnectableLayer* activation = network->AddActivationLayer(activationDescriptor); - IConnectableLayer* permute = network->AddPermuteLayer(desc); - IConnectableLayer* output = network->AddOutputLayer(3); + IConnectableLayer* activation = CreateStartOfLeakyReluNetwork(network.get(), info); - // Establish connections - input0->GetOutputSlot(0).Connect(activation->GetInputSlot(0)); - activation->GetOutputSlot(0).Connect(permute->GetInputSlot(0)); - permute->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + // Add the layer under test + SpaceToBatchNdDescriptor descriptor; + IConnectableLayer* spaceToBatch = network->AddSpaceToBatchNdLayer(descriptor); - //Set TensorInfo - input0->GetOutputSlot(0).SetTensorInfo(info); - activation->GetOutputSlot(0).SetTensorInfo(info); - permute->GetOutputSlot(0).SetTensorInfo(info); + CompleteLeakyReluNetwork(network.get(), activation, spaceToBatch, info); auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestPermuteQuantization validator; + TestSpaceToBatchQuantization validator; VisitLayersTopologically(quantizedNetwork.get(), validator); } -- cgit v1.2.1