aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorNikhil Raj <nikhil.raj@arm.com>2019-09-18 17:16:31 +0100
committerNikhil Raj Arm <nikhil.raj@arm.com>2019-09-19 07:34:56 +0000
commite1c2d02343f0746950fb5bc2b3cd3a34537a1eee (patch)
tree93604eff3eed71cac68aed78e2b9db5b8e12c524 /src
parent42f9d9e8fdb15f418240a64a7b089df611a015a0 (diff)
downloadarmnn-e1c2d02343f0746950fb5bc2b3cd3a34537a1eee.tar.gz
IVGCVSW-3725 Adding quantization support for ArgMinMax
Change-Id: I7582a9ee36b4d1764a5a137cefe9b7b7dfe30254 Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/armnn/QuantizerVisitor.cpp9
-rw-r--r--src/armnn/QuantizerVisitor.hpp4
-rw-r--r--src/armnn/test/QuantizerTest.cpp72
3 files changed, 85 insertions, 0 deletions
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 5a86264efd..564b7bb871 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -136,6 +136,15 @@ void QuantizerVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const
SetQuantizedInputConnections(layer, newLayer);
}
+void QuantizerVisitor::VisitArgMinMaxLayer(const IConnectableLayer* layer,
+ const ArgMinMaxDescriptor& argMinMaxDescriptor,
+ const char* name)
+{
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddArgMinMaxLayer(argMinMaxDescriptor, name);
+ RecordLayer(layer, newLayer);
+ SetQuantizedInputConnections(layer, newLayer);
+}
+
void QuantizerVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* layer,
const BatchNormalizationDescriptor& desc,
const ConstTensor& mean,
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index 3a1e300b7f..db1a669ba8 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -40,6 +40,10 @@ public:
void VisitAdditionLayer(const IConnectableLayer* layer, const char* name = nullptr) override;
+ void VisitArgMinMaxLayer(const IConnectableLayer* layer,
+ const ArgMinMaxDescriptor& argMinMaxDescriptor,
+ const char* name = nullptr) override;
+
void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
const BatchNormalizationDescriptor& desc,
const ConstTensor& mean,
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index d902b8df40..a569c24aaf 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1348,6 +1348,78 @@ BOOST_AUTO_TEST_CASE(QuantizeAbs)
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
}
+BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
+{
+ class TestArgMinMaxQuantization : public TestQuantization
+ {
+ public:
+ TestArgMinMaxQuantization(const TensorShape& inputShape, const TensorShape& outputShape) :
+ TestQuantization(inputShape, outputShape) {}
+
+ TestArgMinMaxQuantization(const QuantizerOptions& options,
+ const TensorShape& inputShape,
+ const TensorShape& outputShape) :
+ TestQuantization(options, inputShape, outputShape)
+ {}
+
+ void VisitInputLayer(const IConnectableLayer* layer,
+ LayerBindingId id,
+ const char* name = nullptr) override
+ {}
+
+ void VisitOutputLayer(const IConnectableLayer* layer,
+ LayerBindingId id,
+ const char* name = nullptr) override
+ {}
+ void VisitArgMinMaxLayer(const IConnectableLayer* layer,
+ const ArgMinMaxDescriptor& argMinMaxDescriptor,
+ const char* name = nullptr) override
+ {
+ TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
+
+ TestQuantizationParams(outputInfo,
+ { 30.0f / g_Asymm8QuantizationBase, 128 },
+ { 15.0f / g_Symm16QuantizationBase, 0 });
+ }
+ };
+
+ INetworkPtr network = INetwork::Create();
+
+ const TensorShape inputShape{ 1, 1, 1, 5 };
+ const TensorShape outputShape{ 1, 1, 1 };
+
+ TensorInfo inputInfo(inputShape, DataType::Float32);
+ TensorInfo outputInfo(outputShape, DataType::Float32);
+
+ // Add the input layers
+ IConnectableLayer* input = network->AddInputLayer(0);
+
+ // Add the layer under test
+ ArgMinMaxDescriptor argMinMaxDescriptor;
+ argMinMaxDescriptor.m_Function = ArgMinMaxFunction::Max;
+ IConnectableLayer* argMinMaxLayer = network->AddArgMinMaxLayer(argMinMaxDescriptor);
+
+ // Add the output layers
+ IConnectableLayer* output = network->AddOutputLayer(1);
+
+ // Establish connections
+ input->GetOutputSlot(0).Connect(argMinMaxLayer->GetInputSlot(0));
+ argMinMaxLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // Set tensor info
+ input->GetOutputSlot(0).SetTensorInfo(inputInfo);
+ argMinMaxLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
+ TestArgMinMaxQuantization validatorQAsymm8(inputShape, outputShape);
+ VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8);
+
+ const QuantizerOptions options(DataType::QuantisedSymm16);
+ INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
+ TestArgMinMaxQuantization validatorQSymm16(options, inputShape, outputShape);
+ VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
+}
+
BOOST_AUTO_TEST_CASE(QuantizeConcat)
{
class TestConcatQuantization : public TestQuantization