diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-04-14 17:55:11 +0100 |
---|---|---|
committer | Cathal Corbett <cathal.corbett@arm.com> | 2022-05-05 16:10:06 +0000 |
commit | 0690265d83e5aa79bd174544a7b35330781619dd (patch) | |
tree | 2cb825017ee202ebcfa9c8428271a4dccaed72a4 /src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp | |
parent | 3a3a6bfaedc64fac3644c8fe88dbfc3947e2b3ab (diff) | |
download | armnn-0690265d83e5aa79bd174544a7b35330781619dd.tar.gz |
IVGCVSW-6127 ConstTensorsAsInput: DepthwiseConvolution2d
!android-nn-driver:7418
* Update Front-end and Tools.
* Updated Serializer, Deserializer and unit tests to reflect this.
* Updated TfLiteDelegate, TfLiteParser and OnnxParser.
* Change NNDriver to new API.
* Updated Ref.
* Neon and Cl backend partially completed (Backend.cpp files).
* Added dynamic or constant input EndToEnd tests.
* Added ConstantTensorAsInputMemeberVariableRedirect Optimization.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ib18b6c10a093042e165e25237dc04a4c67ba82da
Diffstat (limited to 'src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp')
-rw-r--r-- | src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp | 183 |
1 files changed, 183 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp new file mode 100644 index 0000000000..1f9b60a4f2 --- /dev/null +++ b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp @@ -0,0 +1,183 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "EndToEndTestImpl.hpp" +#include <armnnUtils/QuantizeHelper.hpp> + +#include <ResolveType.hpp> + +#include <CommonTestUtils.hpp> +#include <armnnTestUtils/DataLayoutUtils.hpp> + +#include <map> +#include <vector> + +namespace +{ + +armnn::INetworkPtr CreateDepthwiseConvolution2dNetwork(const armnn::DepthwiseConvolution2dDescriptor& descriptor, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& weightsInfo, + const armnn::TensorInfo& biasInfo, + const armnn::TensorInfo& outputInfo, + const armnn::ConstTensor& weights, + const armnn::ConstTensor& biases) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + IConnectableLayer* input = network->AddInputLayer(0, "input"); + armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights"); + armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias"); + IConnectableLayer* convolution2d = network->AddDepthwiseConvolution2dLayer(descriptor, "depthwiseConvolution2d"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(input, convolution2d, inputInfo, 0, 0); + Connect(weightsLayer, convolution2d, weightsInfo, 0, 1); + Connect(biasLayer, convolution2d, biasInfo, 0, 2); + Connect(convolution2d, output, outputInfo, 0, 0); + + return network; +} + +} // anonymous namespace + +template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType> +void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backends, + armnn::DataLayout dataLayout) +{ + using namespace armnn; + using T = ResolveType<ArmnnType>; + using BT = ResolveType<ArmnnBType>; + + const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f; + const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0; + + unsigned int depthMultiplier = 2; + + unsigned int inputHeight = 8; + unsigned int inputWidth = 16; + unsigned int inputChannels = 2; + unsigned int inputBatchSize = 1; + + unsigned int kernelHeight = 5; + unsigned int kernelWidth = 3; + + unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2; + unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2; + unsigned int outputChannels = inputChannels * depthMultiplier; + unsigned int outputBatchSize = inputBatchSize; + + TensorInfo inputInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset, true); + TensorInfo outputInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType, qScale, qOffset); + TensorInfo weightsInfo({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType, qScale, qOffset, true); + TensorInfo biasesInfo({outputChannels}, ArmnnBType, qScale * qScale, 0, true); + + std::vector<float> inputData = + { + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f + }; + + std::vector<float> weightsData = + { + 1.0f, 1.0f, 1.0f, + 1.0f, -1.0f, 1.0f, + 1.0f, 1.0f, 1.0f, + 1.0f, 1.0f, 1.0f, + 1.0f, 1.0f, 1.0f, + + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + + 0.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + + 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f + }; + + std::vector<float> biasesData = { 0.0f, 2.0f, 1.0f, -1.0f }; + + std::vector<float> expectedOutputData = + { + 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, + 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, + 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, + 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, + 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, + 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, + 1.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f + }; + + DepthwiseConvolution2dDescriptor descriptor; + descriptor.m_PadLeft = 0; + descriptor.m_PadRight = 0; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 0; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 1; + descriptor.m_BiasEnabled = true; + descriptor.m_DataLayout = dataLayout; + + // Permute input and output if NCDHW. + if (dataLayout == DataLayout::NCHW) + { + PermuteTensorNhwcToNchw(inputInfo, inputData); + PermuteTensorNhwcToNchw(outputInfo, expectedOutputData); + } + + // Quantize data + std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset); + std::vector<T> qWeightsData = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset); + std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset); + + std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0); + + ConstTensor weights(weightsInfo, qWeightsData); + ConstTensor biases(biasesInfo, qBiasesData); + + INetworkPtr network = CreateDepthwiseConvolution2dNetwork(descriptor, + inputInfo, + weightsInfo, + biasesInfo, + outputInfo, + weights, + biases); + + EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), + { { 0, qInputData } }, + { { 0, qExpectedOutputData } }, + backends); +} |