aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancisMurtagh <francis.murtagh@arm.com>2019-02-12 10:11:43 +0000
committerFrancisMurtagh <francis.murtagh@arm.com>2019-02-12 10:11:43 +0000
commit424951560f2948b49506f178352e788cbe680fd8 (patch)
treeac2b4d815f954ce799e85920648aec05a3f8bbda
parent8c1701a2d9c1da0e1decb2afdc2093aa88810242 (diff)
downloadarmnn-424951560f2948b49506f178352e788cbe680fd8.tar.gz
IVGCVSW-2623 Support static quantization of DepthwiseConv2d
Change-Id: Iab0d5aed243aca921661e4d39770fe02b1330442 Signed-off-by: FrancisMurtagh <francis.murtagh@arm.com>
-rw-r--r--src/armnn/QuantizerVisitor.cpp29
-rw-r--r--src/armnn/QuantizerVisitor.hpp6
-rw-r--r--src/armnn/StaticRangeVisitor.cpp13
-rw-r--r--src/armnn/StaticRangeVisitor.hpp6
-rw-r--r--src/armnn/test/QuantizerTest.cpp100
5 files changed, 149 insertions, 5 deletions
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 08384e285d..ae0d4387b3 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -177,6 +177,35 @@ void QuantizerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
SetQuantizedInputConnections(layer, newLayer);
}
+void QuantizerVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
+ const DepthwiseConvolution2dDescriptor& desc,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ std::vector<uint8_t> weightsBacking;
+ ConstTensor qWeights = CreateQuantizedConst(weights, weightsBacking);
+
+ IConnectableLayer* newLayer;
+ if (biases.has_value())
+ {
+ std::vector<uint8_t> biasesBacking;
+ ConstTensor qBiases = CreateQuantizedConst(biases.value(), biasesBacking);
+
+ newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc,
+ qWeights,
+ qBiases,
+ name);
+ }
+ else
+ {
+ newLayer = m_QuantizedNetwork->AddDepthwiseConvolution2dLayer(desc, qWeights, name);
+ }
+
+ RecordLayer(layer, newLayer);
+ SetQuantizedInputConnections(layer, newLayer);
+}
+
void QuantizerVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index cf151baf3c..c55ef6dd11 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -58,6 +58,12 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
+ void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
+ const DepthwiseConvolution2dDescriptor& desc,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override;
+
void VisitSoftmaxLayer(const IConnectableLayer* layer,
const SoftmaxDescriptor& softmaxDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 05117cd5cd..6eab20083c 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -72,6 +72,19 @@ void StaticRangeVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
SetRange(layer, 0, -15.0f, 15.0f);
}
+void StaticRangeVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
+ const DepthwiseConvolution2dDescriptor& desc,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ boost::ignore_unused(desc);
+ boost::ignore_unused(weights);
+ boost::ignore_unused(biases);
+ boost::ignore_unused(name);
+ SetRange(layer, 0, -15.0f, 15.0f);
+}
+
void StaticRangeVisitor::VisitActivationLayer(const IConnectableLayer* layer,
const ActivationDescriptor& activationDescriptor,
const char* name)
diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp
index d834d0449d..2b0143766a 100644
--- a/src/armnn/StaticRangeVisitor.hpp
+++ b/src/armnn/StaticRangeVisitor.hpp
@@ -43,6 +43,12 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
+ void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
+ const DepthwiseConvolution2dDescriptor& desc,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr) override;
+
void VisitActivationLayer(const IConnectableLayer* layer,
const ActivationDescriptor& activationDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 24c130c372..ac9ea1d446 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -563,17 +563,21 @@ public:
// Based off current static value [-15.0f, 15.0f]
BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / 255.0f, 0.000001f);
- // Test weitghs
+ // Test weights
+ // Instantiate expected values
+ const float quantizationScale = 3.0f / 255.0f;
+ const float tolerance = 3.0f / 255.0f;
+ const int quantizationOffset = 85;
BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), 3.0f / 255.0f, 0.000001f);
- BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == 85));
+ BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), quantizationScale, tolerance);
+ BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == quantizationOffset));
// Test biases
if (biases.has_value())
{
BOOST_TEST((biases.value().GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_CHECK_CLOSE(biases.value().GetInfo().GetQuantizationScale(), 3.0f / 255.0f, 0.000001f);
- BOOST_TEST((biases.value().GetInfo().GetQuantizationOffset() == 85));
+ BOOST_CHECK_CLOSE(biases.value().GetInfo().GetQuantizationScale(), quantizationScale, tolerance);
+ BOOST_TEST((biases.value().GetInfo().GetQuantizationOffset() == quantizationOffset));
}
}
};
@@ -629,6 +633,92 @@ BOOST_AUTO_TEST_CASE(QuantizeConvolution2dWithBiases)
TestQuantizeConvolution2d(true);
}
+class TestDepthwiseConv2dQuantization : public TestQuantization
+{
+public:
+ virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer *layer,
+ const DepthwiseConvolution2dDescriptor& desc,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char *name = nullptr)
+ {
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+ BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_TEST((info.GetQuantizationOffset() == 128));
+
+ // Based off current static value [-15.0f, 15.0f]
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / 255.0f, 0.000001f);
+
+ // Test weights
+ // Instantiate expected values
+ const float quantizationScale = 3.0f / 255.0f;
+ const float tolerance = 3.0f / 255.0f;
+ const int quantizationOffset = 85;
+ BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), quantizationScale, tolerance);
+ BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == quantizationOffset));
+
+ // Test biases
+ if (biases.has_value())
+ {
+ BOOST_TEST((biases.value().GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_CHECK_CLOSE(biases.value().GetInfo().GetQuantizationScale(), quantizationScale, tolerance);
+ BOOST_TEST((biases.value().GetInfo().GetQuantizationOffset() == quantizationOffset));
+ }
+ }
+};
+
+void TestQuantizeDepthwiseConvolution2d(bool useBiases)
+{
+ auto network = INetwork::Create();
+
+ TensorShape shape{3U};
+ TensorInfo info(shape, DataType::Float32);
+
+ std::vector<float> weightsData{-1.0f, 1.5f, 2.0f};
+ ConstTensor weights(info, weightsData);
+
+ DepthwiseConvolution2dDescriptor descriptor;
+ descriptor.m_BiasEnabled = useBiases;
+
+ // Add the layers
+ IConnectableLayer* input0 = network->AddInputLayer(0);
+ IConnectableLayer* depthwiseConv2d;
+ if (useBiases)
+ {
+ std::vector<float> biasesData{-1.0f, 1.5f, 2.0f};
+ ConstTensor biases(info, biasesData);
+ depthwiseConv2d = network->AddDepthwiseConvolution2dLayer(descriptor, weights, biases);
+ }
+ else
+ {
+ depthwiseConv2d = network->AddDepthwiseConvolution2dLayer(descriptor, weights);
+ }
+ IConnectableLayer* output = network->AddOutputLayer(1);
+
+ // Establish connections
+ input0->GetOutputSlot(0).Connect(depthwiseConv2d->GetInputSlot(0));
+ depthwiseConv2d->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ //Set TensorInfo
+ input0->GetOutputSlot(0).SetTensorInfo(info);
+ depthwiseConv2d->GetOutputSlot(0).SetTensorInfo(info);
+
+ auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+ TestDepthwiseConv2dQuantization validator;
+ VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
+BOOST_AUTO_TEST_CASE(QuantizeDepthwiseConvolution2d)
+{
+ TestQuantizeDepthwiseConvolution2d(false);
+}
+
+BOOST_AUTO_TEST_CASE(QuantizeDepthwiseConvolution2dWithBiases)
+{
+ TestQuantizeDepthwiseConvolution2d(true);
+}
+
class TestSoftmaxQuantization : public TestQuantization
{
public: