aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/QuantizerTest.cpp
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-09-18 14:49:29 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-09-18 14:33:36 +0000
commitbab9dc64cb4fad9cc0c4d48678f3e7f841b6504d (patch)
tree7f893b2a7a6684afdf700f92791e5843099150ba /src/armnn/test/QuantizerTest.cpp
parent657ab2d5a4c1b8c41145158a940e8893627bf578 (diff)
downloadarmnn-bab9dc64cb4fad9cc0c4d48678f3e7f841b6504d.tar.gz
IVGCVSW-3881 Add Quantizer support for SLICE
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I72bc00888d416fee177ea2e6e5006f8ff04f612e
Diffstat (limited to 'src/armnn/test/QuantizerTest.cpp')
-rw-r--r--src/armnn/test/QuantizerTest.cpp55
1 files changed, 55 insertions, 0 deletions
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 7a5d27bd52..d902b8df40 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1905,6 +1905,61 @@ BOOST_AUTO_TEST_CASE(QuantizeStack)
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
}
+BOOST_AUTO_TEST_CASE(QuantizeSlice)
+{
+ class TestSliceQuantization : public TestQuantization
+ {
+ public:
+ TestSliceQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
+ : TestQuantization(inputShape, outputShape)
+ {}
+
+ TestSliceQuantization(const QuantizerOptions& options,
+ const TensorShape& inputShape,
+ const TensorShape& outputShape)
+ : TestQuantization(options, inputShape, outputShape)
+ {}
+
+ virtual void VisitSliceLayer(const IConnectableLayer* layer,
+ const SliceDescriptor& desc,
+ const char* name = nullptr)
+ {
+ const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
+
+ const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
+ const OffsetScalePair qSymm16Params{ 15.0f / g_Symm16QuantizationBase, 0 };
+
+ TestQuantizationParams(info, qAsymm8Params, qSymm16Params);
+ }
+ };
+
+ TensorShape shape{ 3 };
+ TensorInfo info(shape, DataType::Float32);
+
+ INetworkPtr network = INetwork::Create();
+
+ IConnectableLayer* inputLayer = network->AddInputLayer(0);
+ IConnectableLayer* sliceLayer = network->AddSliceLayer(SliceDescriptor());
+ IConnectableLayer* outputLayer = network->AddOutputLayer(0);
+
+ inputLayer->GetOutputSlot(0).Connect(sliceLayer->GetInputSlot(0));
+ sliceLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ inputLayer->GetOutputSlot(0).SetTensorInfo(info);
+ sliceLayer->GetOutputSlot(0).SetTensorInfo(info);
+
+ // test QAsymm8 quantization
+ INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
+ TestSliceQuantization validatorQAsymm8(shape, shape);
+ VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8);
+
+ // test QSymm16 quantization
+ const QuantizerOptions options(DataType::QuantisedSymm16);
+ INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
+ TestSliceQuantization validatorQSymm16(options, shape, shape);
+ VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
+}
+
std::vector<uint8_t> SetupQuantize(float value)
{
armnn::TensorInfo inputInfo({ 1, 2, 2 }, armnn::DataType::Float32);