aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-10-03 13:26:30 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2019-10-04 15:17:51 +0000
commitd66d68b13fb309e8d4eac9435a58b89dd6a55158 (patch)
treef180e3254d351b2fc557f9da255a5c0e4b987855 /src/armnn
parente11ff89e92d2d4343a26f1b0d988dea795561da7 (diff)
downloadarmnn-d66d68b13fb309e8d4eac9435a58b89dd6a55158.tar.gz
IVGCVSW-3935 Add Quantizer support for INSTANCE_NORMALIZATION
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I175008c81da028fb5bdc71e0abff06bc6e58734c
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/QuantizerVisitor.cpp9
-rw-r--r--src/armnn/QuantizerVisitor.hpp8
-rw-r--r--src/armnn/test/QuantizerTest.cpp53
3 files changed, 68 insertions, 2 deletions
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 4989e4a88d..9819d71ea9 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -310,6 +310,15 @@ void QuantizerVisitor::VisitInputLayer(const IConnectableLayer *layer, LayerBind
}
}
+void QuantizerVisitor::VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
+ const InstanceNormalizationDescriptor& descriptor,
+ const char* name)
+{
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddInstanceNormalizationLayer(descriptor, name);
+ RecordLayer(layer, newLayer);
+ SetQuantizedInputConnections(layer, newLayer);
+}
+
void QuantizerVisitor::VisitMeanLayer(const IConnectableLayer* layer,
const MeanDescriptor& meanDescriptor,
const char* name)
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index b2acc7c56a..d1c4375b59 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -89,6 +89,10 @@ public:
void VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override;
+ void VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
+ const InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
+ const char* name = nullptr) override;
+
void VisitMeanLayer(const IConnectableLayer* layer,
const MeanDescriptor& meanDescriptor,
const char* name = nullptr) override;
@@ -96,8 +100,8 @@ public:
void VisitMultiplicationLayer(const IConnectableLayer* layer,
const char* name = nullptr) override;
- void VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
- const armnn::NormalizationDescriptor& normalizationDescriptor,
+ void VisitNormalizationLayer(const IConnectableLayer* layer,
+ const NormalizationDescriptor& normalizationDescriptor,
const char* name = nullptr) override;
void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override;
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 6a217f3037..6f7c115164 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1009,6 +1009,59 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthwiseConvolution2dWithBiases)
TestQuantizeDepthwiseConvolution2d(true);
}
+BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization)
+{
+ class TestInstanceNormalizationQuantization : public TestQuantization
+ {
+ public:
+ TestInstanceNormalizationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
+ : TestQuantization(inputShape, outputShape) {}
+
+ TestInstanceNormalizationQuantization(const QuantizerOptions& options,
+ const TensorShape& inputShape,
+ const TensorShape& outputShape)
+ : TestQuantization(options, inputShape, outputShape) {}
+
+ virtual void VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
+ const InstanceNormalizationDescriptor& descriptor,
+ const char* name = nullptr)
+ {
+ const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
+
+ const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
+ const OffsetScalePair qSymm16Params{ 15.0f / g_Symm16QuantizationBase, 0 };
+
+ TestQuantizationParams(info, qAsymm8Params, qSymm16Params);
+ }
+ };
+
+ const TensorShape tensorShape{ 1, 4, 4, 1 };
+ const TensorInfo tensorInfo(tensorShape, DataType::Float32);
+
+ INetworkPtr network = INetwork::Create();
+
+ IConnectableLayer* inputLayer = network->AddInputLayer(0);
+ IConnectableLayer* instanceNormLayer = network->AddInstanceNormalizationLayer(InstanceNormalizationDescriptor());
+ IConnectableLayer* outputLayer = network->AddOutputLayer(0);
+
+ inputLayer->GetOutputSlot(0).Connect(instanceNormLayer->GetInputSlot(0));
+ instanceNormLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ instanceNormLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // test QAsymm8 quantization
+ INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
+ TestInstanceNormalizationQuantization validatorQAsymm8(tensorShape, tensorShape);
+ VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8);
+
+ // test QSymm16 quantization
+ const QuantizerOptions options(DataType::QuantisedSymm16);
+ INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
+ TestInstanceNormalizationQuantization validatorQSymm16(options, tensorShape, tensorShape);
+ VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
+}
+
INetworkPtr CreateNetworkWithSoftmaxLayer(const SoftmaxDescriptor& descriptor, const TensorShape& shape)
{
INetworkPtr network = INetwork::Create();