aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancisMurtagh <francis.murtagh@arm.com>2019-02-12 16:52:24 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2019-02-12 16:58:26 +0000
commitfb1437e86d8e01af9ee9cebe4c8cd9ff508ac779 (patch)
tree609fa13934dd10944efa6c43dc1f2966e82ba309
parent8a4ca10cca8d5dadb52124187c209c955df7a8b6 (diff)
downloadarmnn-fb1437e86d8e01af9ee9cebe4c8cd9ff508ac779.tar.gz
IVGCVSW-2614 Support static quantization of Permute
Change-Id: I4f208d18f7ac6bb04423cc26ecf61adf0ccc5d26 Signed-off-by: FrancisMurtagh <francis.murtagh@arm.com>
-rw-r--r--src/armnn/QuantizerVisitor.cpp10
-rw-r--r--src/armnn/QuantizerVisitor.hpp10
-rw-r--r--src/armnn/StaticRangeVisitor.cpp14
-rw-r--r--src/armnn/StaticRangeVisitor.hpp7
-rw-r--r--src/armnn/test/QuantizerTest.cpp81
5 files changed, 105 insertions, 17 deletions
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index af01092f8e..df5dd59677 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -205,6 +205,16 @@ void QuantizerVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer*
SetQuantizedInputConnections(layer, newLayer);
}
+
+void QuantizerVisitor::VisitPermuteLayer(const IConnectableLayer* layer,
+ const PermuteDescriptor& permuteDescriptor,
+ const char* name)
+{
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddPermuteLayer(permuteDescriptor, name);
+ RecordLayer(layer, newLayer);
+ SetQuantizedInputConnections(layer, newLayer);
+}
+
void QuantizerVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index 121ee1751f..409d808c5b 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -47,9 +47,9 @@ public:
const char* name = nullptr) override;
void VisitFullyConnectedLayer(const IConnectableLayer *layer,
- const FullyConnectedDescriptor&,
- const ConstTensor&,
- const Optional<ConstTensor>&,
+ const FullyConnectedDescriptor& desc,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
const char *name = nullptr) override;
void VisitConvolution2dLayer(const IConnectableLayer* layer,
@@ -68,6 +68,10 @@ public:
const SoftmaxDescriptor& softmaxDescriptor,
const char* name = nullptr) override;
+ void VisitPermuteLayer(const IConnectableLayer* layer,
+ const PermuteDescriptor& permuteDescriptor,
+ const char* name = nullptr) override;
+
/// Extract the quantized network
INetworkPtr RetrieveFinalNetwork() { return std::move(m_QuantizedNetwork); }
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 2365e1b38c..ecdc1ad957 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -21,6 +21,12 @@ void StaticRangeVisitor::SetRange(const IConnectableLayer* layer, unsigned int o
m_RangeTracker.SetRange(layer, outputIdx, min, max);
}
+void StaticRangeVisitor::ForwardParentParameters(const IConnectableLayer* layer)
+{
+ const auto parentRange = m_RangeTracker.GetRange(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), 0);
+ SetRange(layer, 0, parentRange.first, parentRange.second);
+}
+
void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
{
SetRange(layer, 0, -20.f, 20.f);
@@ -110,6 +116,14 @@ void StaticRangeVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer
SetRange(layer, 0, -15.0f, 15.0f);
}
+void StaticRangeVisitor::VisitPermuteLayer(const IConnectableLayer* layer,
+ const PermuteDescriptor& permuteDescriptor,
+ const char* name)
+{
+ boost::ignore_unused(permuteDescriptor);
+ ForwardParentParameters(layer);
+}
+
void StaticRangeVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp
index e1f68f3a31..a69df8b705 100644
--- a/src/armnn/StaticRangeVisitor.hpp
+++ b/src/armnn/StaticRangeVisitor.hpp
@@ -54,6 +54,11 @@ public:
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
const char *name) override;
+
+ void VisitPermuteLayer(const IConnectableLayer* layer,
+ const PermuteDescriptor& permuteDescriptor,
+ const char* name) override;
+
void VisitSoftmaxLayer(const IConnectableLayer* layer,
const SoftmaxDescriptor& softmaxDescriptor,
const char* name = nullptr) override;
@@ -62,6 +67,8 @@ private:
/// Set the range for an output slot on a layer
void SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max);
+ void ForwardParentParameters(const IConnectableLayer* layer);
+
/// Mapping from a layer Guid to an array of ranges for outputs
RangeTracker& m_RangeTracker;
};
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index a130c1ff80..319143ed5d 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -261,26 +261,26 @@ BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
VisitLayersTopologically(quantizedNetwork.get(), validator);
}
-BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation)
+class TestLeakyReLuActivationQuantization : public TestQuantization
{
- class TestLeakyReLuActivationQuantization : public TestQuantization
+public:
+ virtual void VisitActivationLayer(const IConnectableLayer* layer,
+ const ActivationDescriptor& descriptor,
+ const char* name = nullptr)
{
- public:
- virtual void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& descriptor,
- const char* name = nullptr)
- {
- TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((info.GetQuantizationOffset() == 64));
+ BOOST_TEST((info.GetQuantizationOffset() == 64));
- // Based off current static value [-5.0f, 15.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f);
- }
- };
+ // Based off current static value [-5.0f, 15.0f]
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f);
+ }
+};
+BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation)
+{
ActivationDescriptor descriptor;
descriptor.m_Function = ActivationFunction::LeakyReLu;
descriptor.m_A = 3.5f;
@@ -766,5 +766,58 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax)
VisitLayersTopologically(quantizedNetwork.get(), validator);
}
+BOOST_AUTO_TEST_CASE(QuantizePermute)
+{
+
+ class TestPermuteQuantization : public TestLeakyReLuActivationQuantization
+ {
+ public:
+ virtual void VisitPermuteLayer(const IConnectableLayer* layer,
+ const PermuteDescriptor& desc,
+ const char* name = nullptr)
+ {
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+ BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+
+ BOOST_TEST((info.GetQuantizationOffset() == 64));
+
+ // Based off parent LeakyReLu [-5.f, 15.f]
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f);
+ }
+ };
+
+ auto network = INetwork::Create();
+
+ TensorShape shape{1U};
+ TensorInfo info(shape, DataType::Float32);
+
+ PermuteDescriptor desc;
+ ActivationDescriptor activationDescriptor;
+ activationDescriptor.m_Function = ActivationFunction::LeakyReLu;
+ activationDescriptor.m_A = 3.5f;
+ activationDescriptor.m_B = -10.0f;
+
+ // Add the layers
+ IConnectableLayer* input0 = network->AddInputLayer(0);
+ IConnectableLayer* activation = network->AddActivationLayer(activationDescriptor);
+ IConnectableLayer* permute = network->AddPermuteLayer(desc);
+ IConnectableLayer* output = network->AddOutputLayer(3);
+
+ // Establish connections
+ input0->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
+ activation->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
+ permute->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ //Set TensorInfo
+ input0->GetOutputSlot(0).SetTensorInfo(info);
+ activation->GetOutputSlot(0).SetTensorInfo(info);
+ permute->GetOutputSlot(0).SetTensorInfo(info);
+
+ auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+ TestPermuteQuantization validator;
+ VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
BOOST_AUTO_TEST_SUITE_END()
} // namespace armnn