From c5fe6e71cd39096af7c2523ec2afe96008c51b0c Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Fri, 25 Nov 2022 16:10:00 +0000 Subject: IVGCVSW-7168 Add Conv2d and Constant support to TOSA Reference Backend * Added TOSA Conv2d and Constant mappings. * Added unique naming to mappings based on previous and following layers, so they are connected correctly. * Updated existing mappings with new naming convention. * Added all mappings to one main block in OptimizeSubgraphView. * Removed isMain from mapping functions. * Added Conv2d EndToEnd test. Signed-off-by: Matthew Sloyan Change-Id: I27c3e238407c32379ce25a1f01dad11523ef5d2b --- .../test/Convolution2dEndToEndTestImpl.hpp | 134 +++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp (limited to 'src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp') diff --git a/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp new file mode 100644 index 0000000000..bc9a94289b --- /dev/null +++ b/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp @@ -0,0 +1,134 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "EndToEndTestImpl.hpp" +#include + +#include + +#include +#include + +#include +#include + +namespace +{ + +armnn::INetworkPtr CreateConstConvolution2dNetwork(const armnn::Convolution2dDescriptor& descriptor, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& weightsInfo, + const armnn::TensorInfo& biasInfo, + const armnn::TensorInfo& outputInfo, + const armnn::ConstTensor& weights, + const armnn::ConstTensor& biases, + bool biasEnabled) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + IConnectableLayer* input = network->AddInputLayer(0, "input"); + IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights"); + IConnectableLayer* convolution2d = network->AddConvolution2dLayer(descriptor, "convolution2d"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(input, convolution2d, inputInfo, 0, 0); + Connect(weightsLayer, convolution2d, weightsInfo, 0, 1); + + if(biasEnabled) + { + armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias"); + Connect(biasLayer, convolution2d, biasInfo, 0, 2); + } + + Connect(convolution2d, output, outputInfo, 0, 0); + + return network; +} + +template> +void Convolution2dEndToEnd(const std::vector& backends, + armnn::DataLayout dataLayout, + bool biasEnabled = true) +{ + using namespace armnn; + + const float qScale = IsQuantizedType() ? 0.25f : 1.0f; + const int32_t qOffset = IsQuantizedType() ? 50 : 0; + + TensorInfo inputInfo({ 1, 5, 5, 1 }, ArmnnType, qScale, qOffset, true); + TensorInfo outputInfo({ 1, 3, 3, 1 }, ArmnnType, qScale, qOffset); + TensorInfo weightsInfo({ 1, 3, 3, 1 }, ArmnnType, qScale, qOffset, true); + TensorInfo biasesInfo({ 1 }, ArmnnType, qScale * qScale, 0, true); + + std::vector inputData = + { + 1.0f, 5.0f, 2.0f, 3.0f, 5.0f, + 8.0f, 7.0f, 3.0f, 6.0f, 3.0f, + 3.0f, 3.0f, 9.0f, 1.0f, 9.0f, + 4.0f, 1.0f, 8.0f, 1.0f, 3.0f, + 6.0f, 8.0f, 1.0f, 9.0f, 2.0f + }; + + std::vector weightsData = + { + 4.0f, 5.0f, 6.0f, + 0.0f, 0.0f, 0.0f, + 3.0f, 2.0f, 1.0f + }; + + std::vector biasesData = { 1.0f }; + + float bias = biasEnabled ? biasesData[0] : 0.0f; + std::vector expectedOutputData = + { + 65.0f + bias, 76.0f + bias, 91.0f + bias, + 107.0f + bias, 99.0f + bias, 89.0f + bias, + 116.0f + bias, 98.0f + bias, 118.0f + bias, + }; + + Convolution2dDescriptor descriptor; + descriptor.m_PadLeft = 0; + descriptor.m_PadRight = 0; + descriptor.m_PadTop = 0; + descriptor.m_PadBottom = 0; + descriptor.m_StrideX = 1; + descriptor.m_StrideY = 1; + descriptor.m_BiasEnabled = biasEnabled; + descriptor.m_DataLayout = dataLayout; + + if (dataLayout == DataLayout::NCHW) + { + PermuteTensorNhwcToNchw(inputInfo, inputData); + PermuteTensorNhwcToNchw(weightsInfo, weightsData); + PermuteTensorNhwcToNchw(outputInfo, expectedOutputData); + } + + // Quantize data + std::vector qInputData = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector qWeightsData = armnnUtils::QuantizedVector(weightsData, qScale, qOffset); + std::vector qExpectedOutputData = armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset); + std::vector qBiasesData = armnnUtils::QuantizedVector(biasesData, qScale * qScale, 0); + + ConstTensor weights(weightsInfo, qWeightsData); + ConstTensor biases(biasesInfo, qBiasesData); + + INetworkPtr network = CreateConstConvolution2dNetwork(descriptor, + inputInfo, + weightsInfo, + biasesInfo, + outputInfo, + weights, + biases, + biasEnabled); + + EndToEndLayerTestImpl(std::move(network), + {{ 0, qInputData }}, + {{ 0, qExpectedOutputData }}, + backends); +} + +} // anonymous namespace -- cgit v1.2.1