From dba634fd6a66a9e033a1925b0b26c80b270bbf21 Mon Sep 17 00:00:00 2001 From: Matthew Jackson Date: Thu, 15 Aug 2019 15:14:18 +0100 Subject: IVGCVSW-3639 Add 5d tensor support * Increased MaxNumOfTensorDimensions and fixed issues related to its use * Fixed issues caused by assuming 5d tensors are invalid * Updated ArmComputeTensorUtils for 5d tensors * Added 5d tensor unit tests for add, mul, stack and reshape (needed by IVGCVSW-3527) Signed-off-by: Matthew Jackson Change-Id: I5bcd64942d0d04efcc6c5acb240ad4b88e010743 --- src/backends/backendsCommon/test/LayerTests.hpp | 364 +++++++++++++++++------- 1 file changed, 262 insertions(+), 102 deletions(-) (limited to 'src/backends/backendsCommon/test/LayerTests.hpp') diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 0fe5d09cd4..df33aa192e 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -539,6 +539,11 @@ LayerTestResult SimpleReshapeTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template> +LayerTestResult Reshape5dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult SimpleFloorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -712,6 +717,10 @@ LayerTestResult AdditionTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult Addition5dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult AdditionBroadcast1ElementTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -764,6 +773,10 @@ LayerTestResult MultiplicationTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult Multiplication5dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult MultiplicationBroadcast1ElementTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -2084,6 +2097,37 @@ LayerTestResult QuantizeClampInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template > +std::vector ConvertToDataType(const std::vector& input, + const armnn::TensorInfo& inputTensorInfo) +{ + std::vector output(input.size()); + auto outputTensorInfo = inputTensorInfo; + outputTensorInfo.SetDataType(ArmnnType); + + std::unique_ptr> pOutputEncoder = armnn::MakeEncoder(outputTensorInfo, output.data()); + armnn::Encoder& rOutputEncoder = *pOutputEncoder; + + for (auto it = input.begin(); it != input.end(); ++it) + { + rOutputEncoder.Set(*it); + ++rOutputEncoder; + } + return output; +} + +// Utility method to convert a single value to the correct type +template +T ConvertToDataType(const float& value, + const armnn::TensorInfo& tensorInfo) +{ + std::vector output(1); + std::unique_ptr> pEncoder = armnn::MakeEncoder(tensorInfo, output.data()); + armnn::Encoder& rEncoder = *pEncoder; + rEncoder.Set(value); + return output[0]; +} + template LayerTestResult SimpleFullyConnectedTestImpl( armnn::IWorkloadFactory& workloadFactory, @@ -2130,35 +2174,75 @@ LayerTestResult SimpleFullyConnectedTestImpl( return result; } -template > -std::vector ConvertToDataType(const std::vector& input, - const armnn::TensorInfo& inputTensorInfo) +template +LayerTestResult FullyConnectedTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled) { - std::vector output(input.size()); - auto outputTensorInfo = inputTensorInfo; - outputTensorInfo.SetDataType(ArmnnType); + constexpr static unsigned int inputWidth = 3u; + constexpr static unsigned int inputHeight = 2u; + constexpr static unsigned int inputChannels = 1u; - std::unique_ptr> pOutputEncoder = armnn::MakeEncoder(outputTensorInfo, output.data()); - armnn::Encoder& rOutputEncoder = *pOutputEncoder; + constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels; - for (auto it = input.begin(); it != input.end(); ++it) + constexpr static unsigned int outputChannels = 2u; + + armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType); + inputTensorInfo.SetQuantizationScale(0.1f); + inputTensorInfo.SetQuantizationOffset(63); + + armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType); + outputTensorInfo.SetQuantizationScale(5.f); + outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10); + + armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType); + weightsDesc.SetQuantizationScale(0.2f); + weightsDesc.SetQuantizationOffset(93); + + armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value()); + biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale()); + biasesDesc.SetQuantizationOffset(0); + + LayerTestResult result(outputTensorInfo); + + auto input = MakeTensor(inputTensorInfo, ConvertToDataType( + { + -1.2f, 6.1f, -3.5f, + 18.8f, -5.5f, 2.9f + }, + inputTensorInfo)); + + auto weights = MakeTensor(weightsDesc, ConvertToDataType( + { + -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f, + 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f + }, + weightsDesc)); + + auto bias = MakeTensor(biasesDesc, std::vector{9250, 67500}); + + result = SimpleFullyConnectedTestImpl( + workloadFactory, + memoryManager, + inputTensorInfo, outputTensorInfo, + weightsDesc, biasesDesc, + weights, bias, input, + biasEnabled, true + ); + + if (biasEnabled) { - rOutputEncoder.Set(*it); - ++rOutputEncoder; + result.outputExpected = MakeTensor(outputTensorInfo, + ConvertToDataType({80.f, 1460.f}, outputTensorInfo)); + } + else + { + result.outputExpected = MakeTensor(outputTensorInfo, + ConvertToDataType({-107.04f, 110.f}, outputTensorInfo)); } - return output; -} -// Utility method to convert a single value to the correct type -template -T ConvertToDataType(const float& value, - const armnn::TensorInfo& tensorInfo) -{ - std::vector output(1); - std::unique_ptr> pEncoder = armnn::MakeEncoder(tensorInfo, output.data()); - armnn::Encoder& rEncoder = *pEncoder; - rEncoder.Set(value); - return output[0]; + return result; } template @@ -2354,8 +2438,8 @@ LayerTestResult RsqrtNegativeTest( inputValues, expectedOutputValues); } -template -LayerTestResult SimpleReshapeTestImpl( +template +LayerTestResult SimpleReshapeTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::TensorInfo inputTensorInfo, @@ -2363,10 +2447,10 @@ LayerTestResult SimpleReshapeTestImpl( const std::vector& inputData, const std::vector& outputExpectedData) { - auto input = MakeTensor(inputTensorInfo, inputData); + auto input = MakeTensor(inputTensorInfo, inputData); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); + LayerTestResult ret(outputTensorInfo); + ret.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -2381,86 +2465,15 @@ LayerTestResult SimpleReshapeTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.origin()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); return ret; } -template -LayerTestResult FullyConnectedTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - bool biasEnabled) -{ - constexpr static unsigned int inputWidth = 3u; - constexpr static unsigned int inputHeight = 2u; - constexpr static unsigned int inputChannels = 1u; - - constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels; - - constexpr static unsigned int outputChannels = 2u; - - armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(63); - - armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType); - outputTensorInfo.SetQuantizationScale(5.f); - outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10); - - armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType); - weightsDesc.SetQuantizationScale(0.2f); - weightsDesc.SetQuantizationOffset(93); - - armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value()); - biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale()); - biasesDesc.SetQuantizationOffset(0); - - LayerTestResult result(outputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, ConvertToDataType( - { - -1.2f, 6.1f, -3.5f, - 18.8f, -5.5f, 2.9f - }, - inputTensorInfo)); - - auto weights = MakeTensor(weightsDesc, ConvertToDataType( - { - -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f, - 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f - }, - weightsDesc)); - - auto bias = MakeTensor(biasesDesc, std::vector{9250, 67500}); - - result = SimpleFullyConnectedTestImpl( - workloadFactory, - memoryManager, - inputTensorInfo, outputTensorInfo, - weightsDesc, biasesDesc, - weights, bias, input, - biasEnabled, true - ); - - if (biasEnabled) - { - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType({80.f, 1460.f}, outputTensorInfo)); - } - else - { - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType({-107.04f, 110.f}, outputTensorInfo)); - } - - return result; -} - template LayerTestResult SimpleReshapeTest( armnn::IWorkloadFactory& workloadFactory, @@ -2509,7 +2522,69 @@ LayerTestResult SimpleReshapeTest( }, outputTensorInfo); - return SimpleReshapeTestImpl( + return SimpleReshapeTestImpl( + workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected); +} + +template +LayerTestResult Reshape5dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 2, 2, 8, 1, 1 }; + unsigned int outputShape[] = { 2, 2, 2, 2, 2 }; + + inputTensorInfo = armnn::TensorInfo(5, inputShape, ArmnnType); + inputTensorInfo.SetQuantizationScale(1.0f); + outputTensorInfo = armnn::TensorInfo(5, outputShape, ArmnnType); + outputTensorInfo.SetQuantizationScale(1.0f); + + auto input = ConvertToDataType( + { + 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, + 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + + 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, + 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, + }, + inputTensorInfo); + + auto outputExpected = ConvertToDataType( + { + 0.0f, 1.0f, + 2.0f, 3.0f, + + 4.0f, 5.0f, + 6.0f, 7.0f, + + + 8.0f, 9.0f, + 10.0f, 11.0f, + + 12.0f, 13.0f, + 14.0f, 15.0f, + + + + 16.0f, 17.0f, + 18.0f, 19.0f, + + 20.0f, 21.0f, + 22.0f, 23.0f, + + + 24.0f, 25.0f, + 26.0f, 27.0f, + + 28.0f, 29.0f, + 30.0f, 31.0f, + }, + outputTensorInfo); + + return SimpleReshapeTestImpl( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected); } @@ -4746,3 +4821,88 @@ LayerTestResult Stack3dOutput1Axis3InputTest( outputExpectedData ); } + +template> +LayerTestResult Stack5dOutputTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::TensorInfo inputTensorInfo ({ 2, 2, 2, 3 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 2, 2, 2, 2, 3 }, ArmnnType); + + std::vector> inputData; + + inputData.push_back( + { + 1, 2, 3, + 4, 5, 6, + + 7, 8, 9, + 10, 11, 12, + + + 13, 14, 15, + 16, 17, 18, + + 19, 20, 21, + 22, 23, 24 + }); + + inputData.push_back( + { + 25, 26, 27, + 28, 29, 30, + + 31, 32, 33, + 34, 35, 36, + + + 37, 38, 39, + 40, 41, 42, + + 43, 44, 45, + 46, 47, 48 + }); + + std::vector outputExpectedData = + { + 1, 2, 3, + 4, 5, 6, + + 7, 8, 9, + 10, 11, 12, + + + 25, 26, 27, + 28, 29, 30, + + 31, 32, 33, + 34, 35, 36, + + + + 13, 14, 15, + 16, 17, 18, + + 19, 20, 21, + 22, 23, 24, + + + 37, 38, 39, + 40, 41, 42, + + 43, 44, 45, + 46, 47, 48 + + }; + + return StackTestHelper( + workloadFactory, + memoryManager, + inputTensorInfo, + outputTensorInfo, + 1U, + inputData, + outputExpectedData + ); +} -- cgit v1.2.1