aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-10-22 14:22:17 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-10-22 14:59:06 +0000
commite6ed0ee9657af4e546e7e4798d1bbe658be3e336 (patch)
tree8ae0a087df2a23797a590050b320f9eb94986f8f
parent15effd8806e77c226b55e546bb44110177383fab (diff)
downloadarmnn-e6ed0ee9657af4e546e7e4798d1bbe658be3e336.tar.gz
IVGCVSW-3995 Add Quantizer support for ComparisonLayer
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: Iab9b234d233591391a8e8626b7e8df3b7b0770a0
-rw-r--r--src/armnn/QuantizerVisitor.cpp9
-rw-r--r--src/armnn/QuantizerVisitor.hpp4
-rw-r--r--src/armnn/test/QuantizerTest.cpp57
3 files changed, 70 insertions, 0 deletions
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 7158c99995..8a3d4f2990 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -185,6 +185,15 @@ void QuantizerVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
SetQuantizedInputConnections(layer, newLayer);
}
+void QuantizerVisitor::VisitComparisonLayer(const IConnectableLayer* layer,
+ const ComparisonDescriptor& comparisonDescriptor,
+ const char* name)
+{
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddComparisonLayer(comparisonDescriptor, name);
+ RecordLayer(layer, newLayer);
+ SetQuantizedInputConnections(layer, newLayer);
+}
+
void QuantizerVisitor::VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& originsDescriptor,
const char* name)
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index 89d1932a08..db0134d7a4 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -56,6 +56,10 @@ public:
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name = nullptr) override;
+ void VisitComparisonLayer(const IConnectableLayer* layer,
+ const ComparisonDescriptor& comparisonDescriptor,
+ const char* name = nullptr) override;
+
void VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& originsDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 101be1fb57..e7feef57c5 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1586,6 +1586,63 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
}
+BOOST_AUTO_TEST_CASE(QuantizeComparison)
+{
+ class TestComparisonQuantization : public TestQuantization
+ {
+ public:
+ TestComparisonQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
+ : TestQuantization(inputShape, outputShape) {}
+
+ TestComparisonQuantization(const QuantizerOptions& options,
+ const TensorShape& inputShape,
+ const TensorShape& outputShape)
+ : TestQuantization(options, inputShape, outputShape) {}
+
+ void VisitComparisonLayer(const IConnectableLayer* layer,
+ const ComparisonDescriptor& descriptor,
+ const char* name = nullptr) override
+ {
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+ const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
+ const OffsetScalePair qSymm16Params{ 15.0f / g_Symm16QuantizationBase, 0 };
+
+ TestQuantizationParams(info, qAsymm8Params, qSymm16Params);
+ }
+ };
+
+ const TensorShape tensorShape{ 1u };
+ const TensorInfo tensorInfo(tensorShape, DataType::Float32);
+
+ INetworkPtr network = INetwork::Create();
+ ComparisonDescriptor descriptor(ComparisonOperation::LessOrEqual);
+
+ IConnectableLayer* inputLayer0 = network->AddInputLayer(0);
+ IConnectableLayer* inputLayer1 = network->AddInputLayer(1);
+ IConnectableLayer* comparisonLayer = network->AddComparisonLayer(descriptor);
+ IConnectableLayer* outputLayer = network->AddOutputLayer(0);
+
+ inputLayer0->GetOutputSlot(0).Connect(comparisonLayer->GetInputSlot(0));
+ inputLayer1->GetOutputSlot(0).Connect(comparisonLayer->GetInputSlot(1));
+ comparisonLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ inputLayer0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ comparisonLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // test QAsymm8 quantization
+ INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
+ TestComparisonQuantization validatorQAsymm8(tensorShape, tensorShape);
+ VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8);
+
+ // test QuantisedSymm16 quantization
+ const QuantizerOptions options(DataType::QuantisedSymm16);
+ INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
+ TestComparisonQuantization validatorQSymm16(options, tensorShape, tensorShape);
+ VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
+}
+
BOOST_AUTO_TEST_CASE(QuantizeConcat)
{
class TestConcatQuantization : public TestQuantization