From 98180eff3c1feed19576d82ceac06d476235d973 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Wed, 26 Jun 2019 15:02:47 +0100 Subject: IVGCVSW-3324 Add end-to-end tests for TransposeConvolution2d on CpuRef * Added one end-to-end test for all supported data types and data layout * Implemented RefLayerSupport::IsTransposeConvolution2dSupported() * Fixed formula used in TransposeConvolution2dLayer::InferOutputShapes() Signed-off-by: Aron Virginas-Tar Change-Id: If1ba3c226ecfa17f7fceffae857f39297c6433f2 --- .../TransposeConvolution2dEndToEndTestImpl.hpp | 153 +++++++++++++++++++++ src/backends/reference/RefLayerSupport.cpp | 46 +++++++ src/backends/reference/RefLayerSupport.hpp | 8 ++ src/backends/reference/RefWorkloadFactory.cpp | 4 + src/backends/reference/test/RefEndToEndTests.cpp | 38 +++++ 5 files changed, 249 insertions(+) create mode 100644 src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp (limited to 'src/backends') diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp new file mode 100644 index 0000000000..9d6312ea53 --- /dev/null +++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp @@ -0,0 +1,153 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "QuantizeHelper.hpp" + +#include + +#include +#include + +#include + +#include + +#include +#include + +namespace +{ + +INetworkPtr CreateTransposeConvolution2dNetwork(const armnn::TransposeConvolution2dDescriptor& descriptor, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const armnn::ConstTensor& weights, + const armnn::Optional& biases) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + IConnectableLayer* input = network->AddInputLayer(0, "input"); + IConnectableLayer* transposeConvolution2d = + network->AddTransposeConvolution2dLayer(descriptor, weights, biases, "transposeConvolution2d"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(input, transposeConvolution2d, inputInfo, 0, 0); + Connect(transposeConvolution2d, output, outputInfo, 0, 0); + + return network; +} + +} // anonymous namespace + +template +void TransposeConvolution2dEndToEnd(const std::vector& backends, + armnn::DataLayout dataLayout) +{ + using namespace armnn; + using T = ResolveType; + + constexpr unsigned int batches = 1u; + constexpr unsigned int channels = 1u; + + constexpr unsigned int wInput = 3u; + constexpr unsigned int hInput = wInput; + + constexpr unsigned int wOutput = 5u; + constexpr unsigned int hOutput = wOutput; + + constexpr unsigned int wWeights = 3u; + constexpr unsigned int hWeights = wWeights; + + TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, dataLayout); + TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, dataLayout); + TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, dataLayout); + + const float qScale = IsQuantizedType() ? 0.25f : 1.0f; + const int32_t qOffset = IsQuantizedType() ? 50 : 0; + + TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset); + TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset); + TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset); + TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0); + + std::vector inputData = + { + 1.f, 1.f, 1.f, + 1.f, 1.f, 1.f, + 1.f, 1.f, 1.f + }; + + std::vector weightsData = + { + 1.f, 2.f, 3.f, + 4.f, 5.f, 6.f, + 7.f, 8.f, 9.f + }; + + std::vector biasesData = { 1.f }; + + std::vector expectedOutputData = + { + 6.f, 11.f, 6.f, 11.f, 6.f, + 11.f, 21.f, 11.f, 21.f, 11.f, + 6.f, 11.f, 6.f, 11.f, 6.f, + 11.f, 21.f, 11.f, 21.f, 11.f, + 6.f, 11.f, 6.f, 11.f, 6.f + }; + + TransposeConvolution2dDescriptor descriptor; + descriptor.m_PadLeft = 1; + descriptor.m_PadRight = 1; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 2; + descriptor.m_BiasEnabled = true; + descriptor.m_DataLayout = dataLayout; + + // swizzle data if needed + if (dataLayout == armnn::DataLayout::NHWC) + { + constexpr size_t dataTypeSize = sizeof(float); + const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 }; + + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize); + inputData = tmp; + + tmp.resize(weightsData.size()); + armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize); + weightsData = tmp; + + tmp.resize(expectedOutputData.size()); + armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, expectedOutputData.data(), tmp.data(), dataTypeSize); + expectedOutputData = tmp; + } + + // quantize data + std::vector qInputData = QuantizedVector(qScale, qOffset, inputData); + std::vector qWeightsData = QuantizedVector(qScale, qOffset, weightsData); + std::vector qExpectedOutputData = QuantizedVector(qScale, qOffset, expectedOutputData); + + using BT = ResolveType; + std::vector qBiasesData = QuantizedVector(qScale * qScale, 0, biasesData); + + ConstTensor weights(weightsInfo, qWeightsData); + ConstTensor biases(biasesInfo, qBiasesData); + + INetworkPtr network = CreateTransposeConvolution2dNetwork(descriptor, + inputInfo, + outputInfo, + weights, + Optional(biases)); + + + EndToEndLayerTestImpl(std::move(network), + { { 0, qInputData } }, + { { 0, qExpectedOutputData } }, + backends); +} \ No newline at end of file diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 06d9e1bff9..429993a55f 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -1466,4 +1466,50 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input, return supported; } +bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, + const TensorInfo& output, + const TransposeConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + Optional reasonIfUnsupported) const +{ + ignore_unused(descriptor); + + bool supported = true; + + std::array supportedTypes = + { + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference TransposeConvolution2d: input is not a supported type."); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "Reference TransposeConvolution2d: output is not a supported type."); + + supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported, + "Reference TransposeConvolution2d: weights is not a supported type."); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference TransposeConvolution2d: input and output types mismatched."); + + supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported, + "Reference TransposeConvolution2d: input and weights types mismatched."); + + if (biases.has_value()) + { + std::array biasesSupportedTypes = { + DataType::Float32, + DataType::Signed32 + }; + supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported, + "Reference TransposeConvolution2d: biases is not a supported type."); + } + + return supported; +} + } // namespace armnn diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 5d241492c2..9c397fe66b 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -266,6 +266,14 @@ public: const TensorInfo& alpha, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsTransposeConvolution2dSupported( + const TensorInfo& input, + const TensorInfo& output, + const TransposeConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + Optional reasonIfUnsupported = EmptyOptional()) const override; }; } // namespace armnn diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 5ede8b3f02..d906f93a38 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -468,6 +468,10 @@ std::unique_ptr RefWorkloadFactory::CreateTransposeConvolution2d( const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const { + if (IsFloat16(info)) + { + return MakeWorkload(descriptor, info); + } return std::make_unique(descriptor, info); } diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 4d56952e27..a528a54cd2 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -930,4 +931,41 @@ BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndUint8Test) Splitter4dDim3EndToEnd(defaultBackends); } +// TransposeConvolution2d +BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNchwTest) +{ + TransposeConvolution2dEndToEnd( + defaultBackends, armnn::DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NchwTest) +{ + TransposeConvolution2dEndToEnd( + defaultBackends, armnn::DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NchwTest) +{ + TransposeConvolution2dEndToEnd( + defaultBackends, armnn::DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNhwcTest) +{ + TransposeConvolution2dEndToEnd( + defaultBackends, armnn::DataLayout::NHWC); +} + +BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NhwcTest) +{ + TransposeConvolution2dEndToEnd( + defaultBackends, armnn::DataLayout::NHWC); +} + +BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NhwcTest) +{ + TransposeConvolution2dEndToEnd( + defaultBackends, armnn::DataLayout::NHWC); +} + BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1