From 00d306e4db5153a4f4d280de4d4cf3e03788fefb Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Wed, 28 Aug 2019 18:08:46 +0100 Subject: IVGCVSW-3381 Break up LayerTests.hpp into more manageable files Signed-off-by: Aron Virginas-Tar Change-Id: Icf39434f09fd340ad664cb3b97b8bee6d9da4838 --- .../test/layerTests/FullyConnectedTestImpl.cpp | 349 +++++++++++++++++++++ 1 file changed, 349 insertions(+) create mode 100644 src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp (limited to 'src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp') diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp new file mode 100644 index 0000000000..c84b941e1f --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp @@ -0,0 +1,349 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "FullyConnectedTestImpl.hpp" + +#include + +#include + +#include +#include +#include + +#include + +// +// Implementation templates +// + +template +LayerTestResult SimpleFullyConnectedTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::TensorInfo inputTensorInfo, + armnn::TensorInfo outputTensorInfo, + armnn::TensorInfo weightsDesc, + armnn::TensorInfo biasesDesc, + boost::multi_array& weights, + boost::multi_array& bias, + boost::multi_array& input, + bool biasEnabled, + bool transposeWeights) +{ + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::FullyConnectedQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasesDesc); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; + data.m_Parameters.m_BiasEnabled = biasEnabled; + data.m_Parameters.m_TransposeWeightMatrix = transposeWeights; + + std::unique_ptr workload = workloadFactory.CreateFullyConnected(data, info); + LayerTestResult result(outputTensorInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + + return result; +} + +template +LayerTestResult FullyConnectedTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled) +{ + constexpr static unsigned int inputWidth = 3u; + constexpr static unsigned int inputHeight = 2u; + constexpr static unsigned int inputChannels = 1u; + + constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels; + + constexpr static unsigned int outputChannels = 2u; + + armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType); + inputTensorInfo.SetQuantizationScale(0.1f); + inputTensorInfo.SetQuantizationOffset(63); + + armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType); + outputTensorInfo.SetQuantizationScale(5.f); + outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10); + + armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType); + weightsDesc.SetQuantizationScale(0.2f); + weightsDesc.SetQuantizationOffset(93); + + armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value()); + biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale()); + biasesDesc.SetQuantizationOffset(0); + + LayerTestResult result(outputTensorInfo); + + auto input = MakeTensor(inputTensorInfo, ConvertToDataType( + { + -1.2f, 6.1f, -3.5f, + 18.8f, -5.5f, 2.9f + }, + inputTensorInfo)); + + auto weights = MakeTensor(weightsDesc, ConvertToDataType( + { + -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f, + 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f + }, + weightsDesc)); + + auto bias = MakeTensor(biasesDesc, std::vector{9250, 67500}); + + result = SimpleFullyConnectedTestImpl( + workloadFactory, + memoryManager, + inputTensorInfo, outputTensorInfo, + weightsDesc, biasesDesc, + weights, bias, input, + biasEnabled, true + ); + + if (biasEnabled) + { + result.outputExpected = MakeTensor(outputTensorInfo, + ConvertToDataType({80.f, 1460.f}, outputTensorInfo)); + } + else + { + result.outputExpected = MakeTensor(outputTensorInfo, + ConvertToDataType({-107.04f, 110.f}, outputTensorInfo)); + } + + return result; +} + +// +// ArmNN variant of the AndroidNN fully_connected_float_large test. +// +// Tests the fully connected layer with large values, optionally transposing weights. +// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode. +// +template> +LayerTestResult FullyConnectedLargeTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool transposeWeights, + float qScale = 0.0f, + int32_t qOffset = 0) +{ + unsigned int inputWidth = 1; + unsigned int inputHeight = 1; + unsigned int inputChannels = 5; + unsigned int inputNum = 1; + + unsigned int outputChannels = 1; + unsigned int outputNum = 1; + + // Define the tensor descriptors. + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + armnn::TensorInfo weightsDesc; + armnn::TensorInfo biasesDesc; + + unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; + unsigned int outputShape[] = { outputNum, outputChannels }; + unsigned int weightsShape[] = { inputChannels, outputChannels }; + if (transposeWeights) + { + std::swap(weightsShape[0], weightsShape[1]); + } + + unsigned int biasShape[] = { outputChannels }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType); + weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType); + biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + LayerTestResult result(outputTensorInfo); + + boost::multi_array input = MakeTensor(inputTensorInfo, + QuantizedVector(qScale, qOffset, { + 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f, + }) + ); + + boost::multi_array weights = MakeTensor(weightsDesc, + QuantizedVector(qScale, qOffset, { + 2.0f, 3.0f, 4.0f, 5.0f, 6.0f + }) + ); + + std::vector biasValues({900000.f}); + boost::multi_array bias = MakeTensor(biasesDesc, biasValues); + + result = SimpleFullyConnectedTestImpl( + workloadFactory, + memoryManager, + inputTensorInfo, outputTensorInfo, + weightsDesc, biasesDesc, + weights, bias, input, + true, transposeWeights + ); + + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(qScale, qOffset, { + 965432.0f, + }) + ); + + return result; +} + +// +// Explicit template specializations +// + +template LayerTestResult, 2> +FullyConnectedTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled); + +template LayerTestResult, 2> +FullyConnectedTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled); + +// +// Implementation functions +// + +LayerTestResult FullyConnectedFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + bool transposeWeights) +{ + unsigned int inputWidth = 1; + unsigned int inputHeight = 1; + unsigned int inputChannels = 5; + unsigned int inputNum = 2; + + unsigned int outputChannels = 3; + unsigned int outputNum = 2; + + // Define the tensor descriptors. + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + armnn::TensorInfo weightsDesc; + armnn::TensorInfo biasesDesc; + + unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; + unsigned int outputShape[] = { outputNum, outputChannels }; + unsigned int weightsShape[] = { inputChannels, outputChannels }; + + if (transposeWeights) + { + std::swap(weightsShape[0], weightsShape[1]); + } + + unsigned int biasShape[] = { outputChannels }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32); + weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32); + biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32); + + LayerTestResult result(outputTensorInfo); + + boost::multi_array input = MakeTensor(inputTensorInfo, std::vector( + { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + + 5.0f, 4.0f, 3.0f, 2.0f, 1.0f + }) + ); + + boost::multi_array weights = MakeTensor(weightsDesc, std::vector( + { + .5f, 2.f, .5f, + .5f, 2.f, 1.f, + .5f, 2.f, 2.f, + .5f, 2.f, 3.f, + .5f, 2.f, 4.f + })); + + if (transposeWeights) + { + weights = MakeTensor(weightsDesc, std::vector( + { + .5f, .5f, .5f, .5f, .5f, + 2.f, 2.f, 2.f, 2.f, 2.f, + .5f, 1.f, 2.f, 3.f, 4.f + })); + } + + + std::vector biasValues({0.f, 0.f, 0.f}); + if (biasEnabled) + { + biasValues = std::vector({10.f, 20.f, 30.f}); + } + boost::multi_array bias = MakeTensor(biasesDesc, biasValues); + + result = SimpleFullyConnectedTestImpl( + workloadFactory, + memoryManager, + inputTensorInfo, outputTensorInfo, + weightsDesc, biasesDesc, + weights, bias, input, + biasEnabled, transposeWeights + ); + + result.outputExpected = MakeTensor(outputTensorInfo, std::vector( + { + 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0], + 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1], + 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2], + + 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0], + 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1], + 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2] + }) + ); + + return result; +} + +LayerTestResult FullyConnectedLargeTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool transposeWeights) +{ + return FullyConnectedLargeTestCommon(workloadFactory, memoryManager, transposeWeights); +} -- cgit v1.2.1