From 0690265d83e5aa79bd174544a7b35330781619dd Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Thu, 14 Apr 2022 17:55:11 +0100 Subject: IVGCVSW-6127 ConstTensorsAsInput: DepthwiseConvolution2d !android-nn-driver:7418 * Update Front-end and Tools. * Updated Serializer, Deserializer and unit tests to reflect this. * Updated TfLiteDelegate, TfLiteParser and OnnxParser. * Change NNDriver to new API. * Updated Ref. * Neon and Cl backend partially completed (Backend.cpp files). * Added dynamic or constant input EndToEnd tests. * Added ConstantTensorAsInputMemeberVariableRedirect Optimization. Signed-off-by: Cathal Corbett Change-Id: Ib18b6c10a093042e165e25237dc04a4c67ba82da --- .../test/DepthwiseConvolution2dEndToEndTests.hpp | 183 +++++++++++++++++++++ 1 file changed, 183 insertions(+) create mode 100644 src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp (limited to 'src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp') diff --git a/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp new file mode 100644 index 0000000000..1f9b60a4f2 --- /dev/null +++ b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp @@ -0,0 +1,183 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "EndToEndTestImpl.hpp" +#include + +#include + +#include +#include + +#include +#include + +namespace +{ + +armnn::INetworkPtr CreateDepthwiseConvolution2dNetwork(const armnn::DepthwiseConvolution2dDescriptor& descriptor, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& weightsInfo, + const armnn::TensorInfo& biasInfo, + const armnn::TensorInfo& outputInfo, + const armnn::ConstTensor& weights, + const armnn::ConstTensor& biases) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + IConnectableLayer* input = network->AddInputLayer(0, "input"); + armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights"); + armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias"); + IConnectableLayer* convolution2d = network->AddDepthwiseConvolution2dLayer(descriptor, "depthwiseConvolution2d"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(input, convolution2d, inputInfo, 0, 0); + Connect(weightsLayer, convolution2d, weightsInfo, 0, 1); + Connect(biasLayer, convolution2d, biasInfo, 0, 2); + Connect(convolution2d, output, outputInfo, 0, 0); + + return network; +} + +} // anonymous namespace + +template +void DepthwiseConvolution2dEndToEnd(const std::vector& backends, + armnn::DataLayout dataLayout) +{ + using namespace armnn; + using T = ResolveType; + using BT = ResolveType; + + const float qScale = IsQuantizedType() ? 0.25f : 1.0f; + const int32_t qOffset = IsQuantizedType() ? 50 : 0; + + unsigned int depthMultiplier = 2; + + unsigned int inputHeight = 8; + unsigned int inputWidth = 16; + unsigned int inputChannels = 2; + unsigned int inputBatchSize = 1; + + unsigned int kernelHeight = 5; + unsigned int kernelWidth = 3; + + unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2; + unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2; + unsigned int outputChannels = inputChannels * depthMultiplier; + unsigned int outputBatchSize = inputBatchSize; + + TensorInfo inputInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset, true); + TensorInfo outputInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType, qScale, qOffset); + TensorInfo weightsInfo({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType, qScale, qOffset, true); + TensorInfo biasesInfo({outputChannels}, ArmnnBType, qScale * qScale, 0, true); + + std::vector inputData = + { + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f + }; + + std::vector weightsData = + { + 1.0f, 1.0f, 1.0f, + 1.0f, -1.0f, 1.0f, + 1.0f, 1.0f, 1.0f, + 1.0f, 1.0f, 1.0f, + 1.0f, 1.0f, 1.0f, + + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + + 0.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + + 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f + }; + + std::vector biasesData = { 0.0f, 2.0f, 1.0f, -1.0f }; + + std::vector expectedOutputData = + { + 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, + 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, + 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, + 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, + 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, + 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, + 1.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f + }; + + DepthwiseConvolution2dDescriptor descriptor; + descriptor.m_PadLeft = 0; + descriptor.m_PadRight = 0; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 0; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 1; + descriptor.m_BiasEnabled = true; + descriptor.m_DataLayout = dataLayout; + + // Permute input and output if NCDHW. + if (dataLayout == DataLayout::NCHW) + { + PermuteTensorNhwcToNchw(inputInfo, inputData); + PermuteTensorNhwcToNchw(outputInfo, expectedOutputData); + } + + // Quantize data + std::vector qInputData = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector qWeightsData = armnnUtils::QuantizedVector(weightsData, qScale, qOffset); + std::vector qExpectedOutputData = armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset); + + std::vector qBiasesData = armnnUtils::QuantizedVector(biasesData, qScale * qScale, 0); + + ConstTensor weights(weightsInfo, qWeightsData); + ConstTensor biases(biasesInfo, qBiasesData); + + INetworkPtr network = CreateDepthwiseConvolution2dNetwork(descriptor, + inputInfo, + weightsInfo, + biasesInfo, + outputInfo, + weights, + biases); + + EndToEndLayerTestImpl(std::move(network), + { { 0, qInputData } }, + { { 0, qExpectedOutputData } }, + backends); +} -- cgit v1.2.1