diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2019-10-01 18:35:44 +0100 |
---|---|---|
committer | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2019-10-02 15:00:53 +0100 |
commit | f97f6da835802187da03f597dcc30386c7b9b15b (patch) | |
tree | c936cb6f4c38e19f39bfc935aadff0219d6a38b8 /src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp | |
parent | 680f9911d9d9b369fe321ee2dad014012fb5b20f (diff) | |
download | armnn-f97f6da835802187da03f597dcc30386c7b9b15b.tar.gz |
IVGCVSW-3738 Add end-to-end layer test for DepthToSpace
* Added end-to-end layer test implementation for DepthToSpace
* Added test to reference, CL and NEON backends for all supported
data types and data layouts
* Extracted common data permutation code into new utility file and
refactored some existing tests to reduce code duplication
* Fixed EndToEndLayerTestImpl template to work with Float16 data
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Iaf7a0012c520451052b20c37e36dc05fa8314ff6
Diffstat (limited to 'src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp')
-rw-r--r-- | src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp | 121 |
1 files changed, 121 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp new file mode 100644 index 0000000000..cf4db1d2d4 --- /dev/null +++ b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp @@ -0,0 +1,121 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <ResolveType.hpp> + +#include <armnn/ArmNN.hpp> + +#include <backendsCommon/test/DataLayoutUtils.hpp> +#include <backendsCommon/test/QuantizeHelper.hpp> + +namespace +{ + +armnn::INetworkPtr CreateDepthToSpaceNetwork(const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const armnn::DepthToSpaceDescriptor& descriptor) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + + IConnectableLayer* input = network->AddInputLayer(0, "input"); + IConnectableLayer* depthToSpace = network->AddDepthToSpaceLayer(descriptor, "depthToSpace"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(input, depthToSpace, inputInfo, 0, 0); + Connect(depthToSpace, output, outputInfo, 0, 0); + + return network; +} + +template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> +void DepthToSpaceEndToEndImpl(const std::vector<armnn::BackendId>& backends, + const DepthToSpaceDescriptor& descriptor, + const armnn::TensorShape& nhwcInputShape, + const armnn::TensorShape& nhwcOutputShape, + const std::vector<float>& floatInputData, + const std::vector<float>& floatExpectedOutputData) +{ + using namespace armnn; + + TensorInfo inputInfo(nhwcInputShape, ArmnnType); + TensorInfo outputInfo(nhwcOutputShape, ArmnnType); + + constexpr float qScale = 0.25f; + constexpr int32_t qOffset = 128; + + // Set quantization parameters for quantized types + if (IsQuantizedType<T>()) + { + inputInfo.SetQuantizationScale(qScale); + inputInfo.SetQuantizationOffset(qOffset); + outputInfo.SetQuantizationScale(qScale); + outputInfo.SetQuantizationOffset(qOffset); + } + + std::vector<T> inputData = QuantizedVector<T>(qScale, qOffset, floatInputData); + std::vector<T> expectedOutputData = QuantizedVector<T>(qScale, qOffset, floatExpectedOutputData); + + // Permute tensors from NHWC to NCHW (if needed) + if (descriptor.m_DataLayout == DataLayout::NCHW) + { + PermuteTensorNhwcToNchw(inputInfo, inputData); + PermuteTensorNhwcToNchw(outputInfo, expectedOutputData); + } + + INetworkPtr network = CreateDepthToSpaceNetwork(inputInfo, outputInfo, descriptor); + EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), + { { 0, inputData } }, + { { 0, expectedOutputData } }, + backends); +} + +} // anonymous namespace + +template<armnn::DataType ArmnnType> +void DepthToSpaceEndToEnd(const std::vector<armnn::BackendId>& defaultBackends, + armnn::DataLayout dataLayout) +{ + using namespace armnn; + + TensorShape inputShape = { 2, 2, 2, 4 }; + TensorShape outputShape = { 2, 4, 4, 1 }; + + std::vector<float> inputData = + { + 1.f, 2.f, 3.f, 4.f, + 5.f, 6.f, 7.f, 8.f, + 9.f, 10.f, 11.f, 12.f, + 13.f, 14.f, 15.f, 16.f, + + 17.f, 18.f, 19.f, 20.f, + 21.f, 22.f, 23.f, 24.f, + 25.f, 26.f, 27.f, 28.f, + 29.f, 30.f, 31.f, 32.f + }; + + std::vector<float> expectedOutputData = + { + 1.f, 2.f, 5.f, 6.f, + 3.f, 4.f, 7.f, 8.f, + 9.f, 10.f, 13.f, 14.f, + 11.f, 12.f, 15.f, 16.f, + + 17.f, 18.f, 21.f, 22.f, + 19.f, 20.f, 23.f, 24.f, + 25.f, 26.f, 29.f, 30.f, + 27.f, 28.f, 31.f, 32.f + }; + + DepthToSpaceEndToEndImpl<ArmnnType>(defaultBackends, + DepthToSpaceDescriptor(2, dataLayout), + inputShape, + outputShape, + inputData, + expectedOutputData); +} |