From 389aa70c8a24fa2faf33df5f8cd9a99b0fabe971 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Fri, 21 Jun 2019 15:25:19 +0100 Subject: IVGCVSW-3322 Add Quantizer support for TransposeConvolution2DLayer Signed-off-by: Aron Virginas-Tar Change-Id: I26997d7770585055b2b3256baad2800a4c5ed7e8 --- src/armnn/test/QuantizerTest.cpp | 77 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) (limited to 'src/armnn/test/QuantizerTest.cpp') diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 2792d5c483..4732da393f 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -1715,6 +1715,83 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu) VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); } +void TestQuantizeTransposeConvolution2d(bool useBiases) +{ + class TestTransposeConvolution2dQuantization : public TestQuantization + { + public: + TestTransposeConvolution2dQuantization(const TensorShape& inputShape, const TensorShape& outputShape) : + TestQuantization(inputShape, outputShape) + {} + + TestTransposeConvolution2dQuantization(const QuantizerOptions& options, + const TensorShape& inputShape, + const TensorShape& outputShape) : + TestQuantization(options, inputShape, outputShape) + {} + + void VisitTransposeConvolution2dLayer(const IConnectableLayer *layer, + const TransposeConvolution2dDescriptor& descriptor, + const ConstTensor& weights, + const Optional& biases, + const char *name = nullptr) override + { + TestQuantizationOnLayersWithBiases(layer, weights, biases); + } + }; + + INetworkPtr network = INetwork::Create(); + + TensorShape shape{ 3 }; + TensorInfo info(shape, DataType::Float32); + + std::initializer_list floatData{ -1.0f, 1.5f, 2.0f }; + std::vector weightsData(floatData); + ConstTensor weights(info, weightsData); + + TransposeConvolution2dDescriptor descriptor; + descriptor.m_BiasEnabled = useBiases; + + // construct network + IConnectableLayer* input = network->AddInputLayer(0); + Optional optionalBiases; + std::vector biasesData(floatData); + if (useBiases) + { + ConstTensor biases(info, biasesData); + optionalBiases = Optional(biases); + } + IConnectableLayer* transposeConv2d = network->AddTransposeConvolution2dLayer(descriptor, weights, optionalBiases); + IConnectableLayer* output = network->AddOutputLayer(1); + + input->GetOutputSlot(0).Connect(transposeConv2d->GetInputSlot(0)); + transposeConv2d->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + input->GetOutputSlot(0).SetTensorInfo(info); + transposeConv2d->GetOutputSlot(0).SetTensorInfo(info); + + // test QAsymm8 quantization + INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); + TestTransposeConvolution2dQuantization validatorQAsymm8(shape, shape); + VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8); + + // test QSymm16 quantization + const QuantizerOptions options(DataType::QuantisedSymm16); + INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork(); + TestTransposeConvolution2dQuantization validatorQSymm16(options, shape, shape); + VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); +} + +BOOST_AUTO_TEST_CASE(QuantizeTransposeConvolution2d) +{ + TestQuantizeTransposeConvolution2d(false); +} + +BOOST_AUTO_TEST_CASE(QuantizeTransposeConvolution2dWithBiases) +{ + TestQuantizeTransposeConvolution2d(true); +} + std::vector SetupQuantize(float value) { armnn::TensorInfo inputInfo({ 1, 2, 2 }, armnn::DataType::Float32); -- cgit v1.2.1