From 46c09d0094b708c70bb4770693c9e704b1fbfeb1 Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Tue, 28 May 2019 08:15:28 +0100 Subject: IVGCVSW-2970 Support QSymm16 for FullyConnected workloads * Add support for QSymm16 for FullyConnected * Add templating to Uint8 RefLayerTest to test QSymm16 Change-Id: Ie6e989daf2ca966d6c6805b8017126eb77ebfec4 Signed-off-by: Francis Murtagh --- src/backends/backendsCommon/test/LayerTests.hpp | 145 +++++++++++++++++++++++- 1 file changed, 144 insertions(+), 1 deletion(-) (limited to 'src/backends/backendsCommon/test/LayerTests.hpp') diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 9e57ec58a3..f8f50366eb 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -8,9 +8,15 @@ #include #include +#include "TensorCopyUtils.hpp" +#include "WorkloadTestUtils.hpp" +#include #include #include +#include +#include +#include #include #include @@ -793,7 +799,8 @@ LayerTestResult BoundedReLuUint8Test( float upperBound, float lowerBound); -LayerTestResult FullyConnectedUint8Test( +template> +LayerTestResult FullyConnectedTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled); @@ -1638,3 +1645,139 @@ LayerTestResult QuantizeClampUint8Test( LayerTestResult QuantizeClampInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template +LayerTestResult SimpleFullyConnectedTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::TensorInfo inputTensorInfo, + armnn::TensorInfo outputTensorInfo, + armnn::TensorInfo weightsDesc, + armnn::TensorInfo biasesDesc, + boost::multi_array& weights, + boost::multi_array& bias, + boost::multi_array& input, + bool biasEnabled, + bool transposeWeights) +{ + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::FullyConnectedQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasesDesc); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; + data.m_Parameters.m_BiasEnabled = biasEnabled; + data.m_Parameters.m_TransposeWeightMatrix = transposeWeights; + + std::unique_ptr workload = workloadFactory.CreateFullyConnected(data, info); + LayerTestResult result(outputTensorInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + + return result; +} + +template > +std::vector ConvertToDataType(const std::vector& input, + const armnn::TensorInfo& inputTensorInfo) +{ + std::vector output(input.size()); + auto outputTensorInfo = inputTensorInfo; + outputTensorInfo.SetDataType(ArmnnType); + + std::unique_ptr> pOutputEncoder = armnn::MakeEncoder(outputTensorInfo, output.data()); + armnn::Encoder& rOutputEncoder = *pOutputEncoder; + + for (auto it = input.begin(); it != input.end(); ++it) + { + rOutputEncoder.Set(*it); + ++rOutputEncoder; + } + return output; +} + +template +LayerTestResult FullyConnectedTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled) +{ + constexpr static unsigned int inputWidth = 3u; + constexpr static unsigned int inputHeight = 2u; + constexpr static unsigned int inputChannels = 1u; + + constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels; + + constexpr static unsigned int outputChannels = 2u; + + armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType); + inputTensorInfo.SetQuantizationScale(0.1f); + inputTensorInfo.SetQuantizationOffset(63); + + armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType); + outputTensorInfo.SetQuantizationScale(5.f); + outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10); + + armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType); + weightsDesc.SetQuantizationScale(0.2f); + weightsDesc.SetQuantizationOffset(93); + + armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value()); + biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale()); + biasesDesc.SetQuantizationOffset(0); + + LayerTestResult result(outputTensorInfo); + + auto input = MakeTensor(inputTensorInfo, ConvertToDataType( + { + -1.2f, 6.1f, -3.5f, + 18.8f, -5.5f, 2.9f + }, + inputTensorInfo)); + + auto weights = MakeTensor(weightsDesc, ConvertToDataType( + { + -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f, + 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f + }, + weightsDesc)); + + auto bias = MakeTensor(biasesDesc, std::vector{9250, 67500}); + + result = SimpleFullyConnectedTestImpl( + workloadFactory, + memoryManager, + inputTensorInfo, outputTensorInfo, + weightsDesc, biasesDesc, + weights, bias, input, + biasEnabled, true + ); + + if (biasEnabled) + { + result.outputExpected = MakeTensor(outputTensorInfo, + ConvertToDataType({80.f, 1460.f}, outputTensorInfo)); + } + else + { + result.outputExpected = MakeTensor(outputTensorInfo, + ConvertToDataType({-107.04f, 110.f}, outputTensorInfo)); + } + + return result; +} \ No newline at end of file -- cgit v1.2.1