From 735a450d3b53a2d745b9a7a6d85747e25ec37ede Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Wed, 26 Jun 2019 15:02:47 +0100 Subject: IVGCVSW-3320 Add reference workload support for TransposeConvolution2dLayer Signed-off-by: Aron Virginas-Tar Change-Id: Icc64f8148c9d8a0d14d772e6e4e7865e70585cd9 --- .../backendsCommon/test/CommonTestUtils.cpp | 18 + .../backendsCommon/test/CommonTestUtils.hpp | 6 + src/backends/backendsCommon/test/LayerTests.cpp | 407 +++++++++++++++++ src/backends/backendsCommon/test/LayerTests.hpp | 154 +++++++ .../test/TransposeConvolution2dTestImpl.hpp | 498 +++++++++++++++++++++ 5 files changed, 1083 insertions(+) create mode 100644 src/backends/backendsCommon/test/TransposeConvolution2dTestImpl.hpp (limited to 'src/backends/backendsCommon') diff --git a/src/backends/backendsCommon/test/CommonTestUtils.cpp b/src/backends/backendsCommon/test/CommonTestUtils.cpp index 950b939d71..80512e290a 100644 --- a/src/backends/backendsCommon/test/CommonTestUtils.cpp +++ b/src/backends/backendsCommon/test/CommonTestUtils.cpp @@ -50,3 +50,21 @@ armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& bac return backendObjPtr; } + +armnn::TensorShape MakeTensorShape(unsigned int batches, + unsigned int channels, + unsigned int height, + unsigned int width, + armnn::DataLayout layout) +{ + using namespace armnn; + switch (layout) + { + case DataLayout::NCHW: + return TensorShape{ batches, channels, height, width }; + case DataLayout::NHWC: + return TensorShape{ batches, height, width, channels }; + default: + throw InvalidArgumentException(std::string("Unsupported data layout: ") + GetDataLayoutName(layout)); + } +} diff --git a/src/backends/backendsCommon/test/CommonTestUtils.hpp b/src/backends/backendsCommon/test/CommonTestUtils.hpp index 03c975540a..58bd6b197f 100644 --- a/src/backends/backendsCommon/test/CommonTestUtils.hpp +++ b/src/backends/backendsCommon/test/CommonTestUtils.hpp @@ -68,3 +68,9 @@ armnn::SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(armnn::SubgraphView: armnn::SubgraphView::Layers&& layers); armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& backendId); + +armnn::TensorShape MakeTensorShape(unsigned int batches, + unsigned int channels, + unsigned int height, + unsigned int width, + armnn::DataLayout layout); \ No newline at end of file diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index a625097fdb..ca39438fbf 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -45,6 +45,7 @@ #include "DebugTestImpl.hpp" #include "DequantizeTestImpl.hpp" #include "QuantizeTestImpl.hpp" +#include "TransposeConvolution2dTestImpl.hpp" // 3-channel 16x8 image used as common input data for a number of Conv2d tests. static std::vector ConvInput3x8x16({ @@ -9643,3 +9644,409 @@ LayerTestResult QuantizeClampInt16Test( { return QuantizeClampTest(workloadFactory, memoryManager); } + +// +// TransposeConvolution2d +// + +// Simple biased +LayerTestResult SimpleTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NCHW); +} + +LayerTestResult SimpleTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NHWC); +} + +LayerTestResult SimpleTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NCHW); +} + +LayerTestResult SimpleTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NHWC); +} + +LayerTestResult SimpleTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NCHW); +} + +LayerTestResult SimpleTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NHWC); +} + +// Simple unbiased +LayerTestResult UnbiasedSimpleTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NCHW); +} + +LayerTestResult UnbiasedSimpleTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NHWC); +} + +LayerTestResult UnbiasedSimpleTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NCHW); +} + +LayerTestResult UnbiasedSimpleTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NHWC); +} + +LayerTestResult UnbiasedSimpleTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NCHW); +} + +LayerTestResult UnbiasedSimpleTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SimpleTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NHWC); +} + +// Padded biased +LayerTestResult PaddedTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NCHW); +} + +LayerTestResult PaddedTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NHWC); +} + +LayerTestResult PaddedTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NCHW); +} + +LayerTestResult PaddedTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NHWC); +} + +LayerTestResult PaddedTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NCHW); +} + +LayerTestResult PaddedTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NHWC); +} + +// Padded unbiased +LayerTestResult UnbiasedPaddedTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NCHW); +} + +LayerTestResult UnbiasedPaddedTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NHWC); +} + +LayerTestResult UnbiasedPaddedTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NCHW); +} + +LayerTestResult UnbiasedPaddedTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NHWC); +} + +LayerTestResult UnbiasedPaddedTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NCHW); +} + +LayerTestResult UnbiasedPaddedTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return PaddedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NHWC); +} + +// Strided biased +LayerTestResult StridedTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NCHW); +} + +LayerTestResult StridedTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NHWC); +} + +LayerTestResult StridedTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NCHW); +} + +LayerTestResult StridedTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NHWC); +} + +LayerTestResult StridedTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NCHW); +} + +LayerTestResult StridedTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + true, + armnn::DataLayout::NHWC); +} + +// Strided unbiased +LayerTestResult UnbiasedStridedTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NCHW); +} + +LayerTestResult UnbiasedStridedTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NHWC); +} + +LayerTestResult UnbiasedStridedTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NCHW); +} + +LayerTestResult UnbiasedStridedTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NHWC); +} + +LayerTestResult UnbiasedStridedTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NCHW); +} + +LayerTestResult UnbiasedStridedTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return StridedTransposeConvolution2dTestImpl( + workloadFactory, + memoryManager, + false, + armnn::DataLayout::NHWC); +} \ No newline at end of file diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 10bc00f83b..b225e4d655 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -3760,3 +3760,157 @@ template LayerTestResult( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +// +// TransposeConvolution2d +// + +// Simple biased +LayerTestResult SimpleTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SimpleTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SimpleTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SimpleTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SimpleTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SimpleTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +// Simple unbiased +LayerTestResult UnbiasedSimpleTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedSimpleTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedSimpleTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedSimpleTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedSimpleTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedSimpleTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +// Padded biased +LayerTestResult PaddedTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult PaddedTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult PaddedTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult PaddedTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult PaddedTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult PaddedTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +// Padded unbiased +LayerTestResult UnbiasedPaddedTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedPaddedTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedPaddedTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedPaddedTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedPaddedTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedPaddedTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +// Strided biased +LayerTestResult StridedTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult StridedTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult StridedTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult StridedTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult StridedTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult StridedTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +// Strided unbiased +LayerTestResult UnbiasedStridedTransposeConvolution2dFloatNchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedStridedTransposeConvolution2dFloatNhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedStridedTransposeConvolution2dUint8NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedStridedTransposeConvolution2dUint8NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedStridedTransposeConvolution2dInt16NchwTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult UnbiasedStridedTransposeConvolution2dInt16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); \ No newline at end of file diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dTestImpl.hpp new file mode 100644 index 0000000000..3bbd5d6770 --- /dev/null +++ b/src/backends/backendsCommon/test/TransposeConvolution2dTestImpl.hpp @@ -0,0 +1,498 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "QuantizeHelper.hpp" + +#include + +#include + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +namespace +{ + +template +using TensorData = std::pair>; + +template +void VerifyInputTensorData(const TensorData& data, const std::string& tensorName) +{ + if (data.first.GetNumElements() > data.second.size()) + { + throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " + + std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size())); + } +} + +template +void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::TransposeConvolution2dDescriptor& descriptor, + const TensorData& input, + TensorData& output, + const TensorData& weights, + const armnn::Optional>& biases) +{ + using namespace armnn; + + VerifyInputTensorData(input, "input"); + VerifyInputTensorData(weights, "biases"); + + if (descriptor.m_BiasEnabled) + { + if (!biases.has_value()) + { + throw InvalidArgumentException("Bias enabled but no bias data provided"); + } + VerifyInputTensorData(biases.value(), "biases"); + } + + // set up weights + ScopedCpuTensorHandle weightsTensor(weights.first); + + TransposeConvolution2dQueueDescriptor queueDescriptor; + queueDescriptor.m_Parameters = descriptor; + queueDescriptor.m_Weight = &weightsTensor; + + AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data()); + + std::unique_ptr biasesTensor; + if (descriptor.m_BiasEnabled) + { + // set up biases + biasesTensor = std::make_unique(biases.value().first); + queueDescriptor.m_Bias = biasesTensor.get(); + + AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data()); + } + + // set up input and output handles + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(input.first); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(output.first); + + // set up workload + armnn::WorkloadInfo workloadInfo; + AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get()); + AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get()); + + std::unique_ptr workload = + workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.second.data()); + + ExecuteWorkload(*workload, nullptr); + + // copy output + output.second = std::vector(output.first.GetNumElements(), 0.0f); + CopyDataFromITensorHandle(output.second.data(), outputHandle.get()); +} + +template> +LayerTestResult TransposeConvolution2dTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::TransposeConvolution2dDescriptor& descriptor, + armnn::TensorInfo& inputInfo, + const std::vector& inputData, + armnn::TensorInfo& outputInfo, + const std::vector& expectedOutputData, + armnn::TensorInfo& weightsInfo, + const std::vector& weightsData, + armnn::TensorInfo& biasesInfo, + const std::vector& biasesData) +{ + using namespace armnn; + + // set up quantization parameters + if (armnn::IsQuantizedType()) + { + constexpr float qScale = 0.25f; + constexpr int32_t qOffset = 50; + + inputInfo.SetQuantizationScale(qScale); + inputInfo.SetQuantizationOffset(qOffset); + + outputInfo.SetQuantizationScale(qScale); + outputInfo.SetQuantizationOffset(qOffset); + + weightsInfo.SetQuantizationScale(qScale); + weightsInfo.SetQuantizationOffset(qOffset); + + biasesInfo.SetQuantizationScale(qScale * qScale); + biasesInfo.SetQuantizationOffset(0); + } + + // set up input + TensorData input = + { + inputInfo, + QuantizedVector(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), inputData) + }; + + // set up weights + TensorData weights = + { + weightsInfo, + QuantizedVector(weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(), weightsData) + }; + + // set up biases + using BT = armnn::ResolveType; + Optional> optionalBiases; + if (descriptor.m_BiasEnabled) + { + TensorData biases = + { + biasesInfo, + QuantizedVector(biasesInfo.GetQuantizationScale(), biasesInfo.GetQuantizationOffset(), biasesData) + }; + + optionalBiases = Optional>(biases); + } + + // set up output + TensorData output = { outputInfo, {} }; + + // execute test + TransposeConvolution2dTestImpl(workloadFactory, + memoryManager, + descriptor, + input, + output, + weights, + optionalBiases); + + // construct result object + LayerTestResult testResult(outputInfo); + testResult.output = MakeTensor(outputInfo, output.second); + testResult.outputExpected = MakeTensor(outputInfo, + QuantizedVector(outputInfo.GetQuantizationScale(), + outputInfo.GetQuantizationOffset(), + expectedOutputData)); + + return testResult; +} + +} // anonymous namespace + +template> +LayerTestResult SimpleTransposeConvolution2dTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout) +{ + using namespace armnn; + + constexpr unsigned int batches = 1u; + constexpr unsigned int channels = 1u; + + constexpr unsigned int wInput = 3u; + constexpr unsigned int hInput = wInput; + + constexpr unsigned int wOutput = 5u; + constexpr unsigned int hOutput = wOutput; + + constexpr unsigned int wWeights = 3u; + constexpr unsigned int hWeights = wWeights; + + TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, layout); + TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, layout); + TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, layout); + + TensorInfo inputInfo(inputShape, ArmnnType); + TensorInfo outputInfo(outputShape, ArmnnType); + TensorInfo weightsInfo(weightsShape, ArmnnType); + TensorInfo biasesInfo({ channels }, ArmnnBType); + + std::vector inputData = + { + 1.f, 1.f, 1.f, + 1.f, 1.f, 1.f, + 1.f, 1.f, 1.f + }; + + std::vector weightsData = + { + 1.f, 2.f, 3.f, + 4.f, 5.f, 6.f, + 7.f, 8.f, 9.f + }; + + std::vector biasesData = { 1.f }; + + std::vector expectedOutputData = + { + 1.f, 3.f, 6.f, 5.f, 3.f, + 5.f, 12.f, 21.f, 16.f, 9.f, + 12.f, 27.f, 45.f, 33.f, 18.f, + 11.f, 24.f, 39.f, 28.f, 15.f, + 7.f, 15.f, 24.f, 17.f, 9.f + }; + + if (biasEnabled) + { + // apply bias to expected output data + std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(), + [&](float f) -> float { return f + biasesData[0]; }); + } + + TransposeConvolution2dDescriptor descriptor; + descriptor.m_StrideX = 1; + descriptor.m_StrideY = 1; + descriptor.m_BiasEnabled = biasEnabled; + descriptor.m_DataLayout = layout; + + // swizzle data if needed + if (layout == armnn::DataLayout::NHWC) + { + constexpr size_t dataTypeSize = sizeof(float); + const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 }; + + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize); + inputData = tmp; + + tmp.resize(weightsData.size()); + armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize); + weightsData = tmp; + + tmp.resize(expectedOutputData.size()); + armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, expectedOutputData.data(), tmp.data(), dataTypeSize); + expectedOutputData = tmp; + } + + return TransposeConvolution2dTestImpl(workloadFactory, + memoryManager, + descriptor, + inputInfo, + inputData, + outputInfo, + expectedOutputData, + weightsInfo, + weightsData, + biasesInfo, + biasesData); +} + +template> +LayerTestResult PaddedTransposeConvolution2dTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout) +{ + using namespace armnn; + + constexpr unsigned int batches = 1u; + constexpr unsigned int channels = 1u; + + constexpr unsigned int wInput = 4u; + constexpr unsigned int hInput = wInput; + + constexpr unsigned int wOutput = 2u; + constexpr unsigned int hOutput = wOutput; + + constexpr unsigned int wWeights = 3u; + constexpr unsigned int hWeights = wWeights; + + TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, layout); + TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, layout); + TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, layout); + + TensorInfo inputInfo(inputShape, ArmnnType); + TensorInfo outputInfo(outputShape, ArmnnType); + TensorInfo weightsInfo(weightsShape, ArmnnType); + TensorInfo biasesInfo({ channels }, ArmnnBType); + + std::vector inputData = + { + 1.f, 3.f, 2.f, 1.f, + 1.f, 3.f, 3.f, 1.f, + 2.f, 1.f, 1.f, 3.f, + 3.f, 2.f, 3.f, 3.f + }; + + std::vector weightsData = + { + 1.f, 2.f, 3.f, + 0.f, 1.f, 0.f, + 2.f, 1.f, 2.f + }; + + std::vector biasesData = { 1.f }; + + std::vector expectedOutputData = + { + 21.f, 21.f, + 28.f, 27.f + }; + + if (biasEnabled) + { + // apply bias to expected output data + std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(), + [&](float f) -> float { return f + biasesData[0]; }); + } + + TransposeConvolution2dDescriptor descriptor; + descriptor.m_PadLeft = 2; + descriptor.m_PadRight = 2; + descriptor.m_PadTop = 2; + descriptor.m_PadBottom = 2; + descriptor.m_StrideX = 1; + descriptor.m_StrideY = 1; + descriptor.m_BiasEnabled = biasEnabled; + descriptor.m_DataLayout = layout; + + // swizzle data if needed + if (layout == armnn::DataLayout::NHWC) + { + constexpr size_t dataTypeSize = sizeof(float); + const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 }; + + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize); + inputData = tmp; + + tmp.resize(weightsData.size()); + armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize); + weightsData = tmp; + + tmp.resize(expectedOutputData.size()); + armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, expectedOutputData.data(), tmp.data(), dataTypeSize); + expectedOutputData = tmp; + } + + return TransposeConvolution2dTestImpl(workloadFactory, + memoryManager, + descriptor, + inputInfo, + inputData, + outputInfo, + expectedOutputData, + weightsInfo, + weightsData, + biasesInfo, + biasesData); +} + + template> + LayerTestResult StridedTransposeConvolution2dTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout) +{ + using namespace armnn; + + constexpr unsigned int batches = 1u; + constexpr unsigned int channels = 1u; + + constexpr unsigned int wInput = 3u; + constexpr unsigned int hInput = wInput; + + constexpr unsigned int wOutput = 7u; + constexpr unsigned int hOutput = wOutput; + + constexpr unsigned int wWeights = 3u; + constexpr unsigned int hWeights = wWeights; + + TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, layout); + TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, layout); + TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, layout); + + TensorInfo inputInfo(inputShape, ArmnnType); + TensorInfo outputInfo(outputShape, ArmnnType); + TensorInfo weightsInfo(weightsShape, ArmnnType); + TensorInfo biasesInfo({ channels }, ArmnnBType); + + std::vector inputData = + { + 1.f, 1.f, 1.f, + 1.f, 1.f, 1.f, + 1.f, 1.f, 1.f + }; + + std::vector weightsData = + { + 1.f, 2.f, 3.f, + 4.f, 5.f, 6.f, + 7.f, 8.f, 9.f + }; + + std::vector biasesData = { 1.f }; + + std::vector expectedOutputData = + { + 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f, + 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f, + 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f, + 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f, + 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f, + 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f, + 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f + }; + + if (biasEnabled) + { + // apply bias to expected output data + std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(), + [&](float f) -> float { return f + biasesData[0]; }); + } + + TransposeConvolution2dDescriptor descriptor; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 2; + descriptor.m_BiasEnabled = biasEnabled; + descriptor.m_DataLayout = layout; + + // swizzle data if needed + if (layout == armnn::DataLayout::NHWC) + { + constexpr size_t dataTypeSize = sizeof(float); + const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 }; + + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize); + inputData = tmp; + + tmp.resize(weightsData.size()); + armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize); + weightsData = tmp; + + tmp.resize(expectedOutputData.size()); + armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, expectedOutputData.data(), tmp.data(), dataTypeSize); + expectedOutputData = tmp; + } + + return TransposeConvolution2dTestImpl(workloadFactory, + memoryManager, + descriptor, + inputInfo, + inputData, + outputInfo, + expectedOutputData, + weightsInfo, + weightsData, + biasesInfo, + biasesData); +} \ No newline at end of file -- cgit v1.2.1