From d41b25938323455ea6b6d5348cab8861971b5fba Mon Sep 17 00:00:00 2001 From: Nina Drozd Date: Mon, 19 Nov 2018 13:03:36 +0000 Subject: IVGCVSW-2144: Adding TensorUtils class * helper methods for creating TensorShape and TensorInfo objects Change-Id: I371fc7aea08ca6bbb9c205a143ce36e8353a1c48 --- .../backendsCommon/test/Conv2dTestImpl.hpp | 34 ++++++++++------ src/backends/backendsCommon/test/LayerTests.cpp | 47 +++++++--------------- .../backendsCommon/test/Pooling2dTestImpl.hpp | 21 +++++----- 3 files changed, 47 insertions(+), 55 deletions(-) (limited to 'src/backends/backendsCommon') diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp index 6685a8edd2..d137c8082a 100755 --- a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp @@ -5,6 +5,7 @@ #pragma once #include "WorkloadTestUtils.hpp" +#include "TensorUtils.hpp" #include #include @@ -108,10 +109,12 @@ LayerTestResult SimpleConvolution2dTestImpl( // Note these tensors will use two (identical) batches. - armnn::TensorInfo inputTensorInfo = GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout); - armnn::TensorInfo outputTensorInfo = GetTensorInfo( - 2*outputNum, outputChannels, outputHeight, outputWidth, layout); - armnn::TensorInfo kernelDesc = GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout); + armnn::TensorInfo inputTensorInfo = + armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout); + armnn::TensorInfo outputTensorInfo = + armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout); + armnn::TensorInfo kernelDesc = + armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout); armnn::TensorInfo biasDesc({static_cast(bias.size())}, armnn::GetDataType()); // Set quantization parameters if the requested type is a quantized type. @@ -354,9 +357,12 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels); // Creates the tensors. - armnn::TensorInfo inputTensorInfo = GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout); - armnn::TensorInfo outputTensorInfo = GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout); - armnn::TensorInfo kernelDesc = GetTensorInfo(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout); + armnn::TensorInfo inputTensorInfo = + armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout); + armnn::TensorInfo outputTensorInfo = + armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout); + armnn::TensorInfo kernelDesc = + armnnUtils::GetTensorInfo(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout); armnn::TensorInfo biasDesc({static_cast(bias.size())}, armnn::GetDataType()); // Set quantization parameters if the requested type is a quantized type. @@ -483,9 +489,11 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( unsigned int outputChannels = kernelChannels; unsigned int outputNum = inputNum; - armnn::TensorInfo inputTensorInfo = GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout); - armnn::TensorInfo outputTensorInfo = GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout); - armnn::TensorInfo kernelDesc = GetTensorInfo(1, outputChannels, kernelHeight, kernelWidth, layout); + armnn::TensorInfo inputTensorInfo = + armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout); + armnn::TensorInfo outputTensorInfo = + armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout); + armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo(1, outputChannels, kernelHeight, kernelWidth, layout); armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType()); // Set quantization parameters if the requested type is a quantized type. @@ -629,11 +637,11 @@ LayerTestResult DepthwiseConvolution2dTestImpl( unsigned int outputChannels = inputChannels * depthMultiplier; unsigned int outputBatchSize = inputBatchSize; - armnn::TensorInfo inputTensorInfo = GetTensorInfo( + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo( inputBatchSize, inputChannels, inputHeight, inputWidth, layout); - armnn::TensorInfo outputTensorInfo = GetTensorInfo( + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo( outputBatchSize, outputChannels, outputHeight, outputWidth, layout); - armnn::TensorInfo kernelDesc = GetTensorInfo( + armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo( depthMultiplier, inputChannels, kernelHeight, kernelWidth, layout); armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType()); diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index ecd09ca024..f10d14e942 100755 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -4,6 +4,7 @@ // #include "LayerTests.hpp" #include "WorkloadTestUtils.hpp" +#include "TensorUtils.hpp" #include "test/TensorHelpers.hpp" #include "TensorCopyUtils.hpp" @@ -68,24 +69,6 @@ static std::vector ConvInput3x8x16({ // 2-channel bias used by a number of Conv2d tests. static std::vector Bias2({0, 2}); -armnn::TensorShape GetTestTensorShape(unsigned int numberOfBatches, - unsigned int numberOfChannels, - unsigned int height, - unsigned int width, - const armnn::DataLayoutIndexed& dataLayout) -{ - switch (dataLayout.GetDataLayout()) - { - case armnn::DataLayout::NCHW: - return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width}); - case armnn::DataLayout::NHWC: - return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels}); - default: - throw armnn::InvalidArgumentException("unknown data layout [" - + std::to_string(static_cast(dataLayout.GetDataLayout())) + "]"); - } -} - // Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled. template boost::multi_array GetBias2(bool biasEnabled, float qScale, int32_t qOffset) @@ -3859,8 +3842,8 @@ LayerTestResult ResizeBilinearNopTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); - const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); + const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); + const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, @@ -3913,8 +3896,8 @@ LayerTestResult SimpleResizeBilinearTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); - const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 1, 1, dataLayout); + const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); + const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout); std::vector inputData({ 1.0f, 255.0f, @@ -3979,8 +3962,8 @@ LayerTestResult ResizeBilinearSqMinTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); - const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); + const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); + const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, @@ -4045,8 +4028,8 @@ LayerTestResult ResizeBilinearMinTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 3, 5, dataLayout); - const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 3, dataLayout); + const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout); + const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout); std::vector inputData({ 1.0f, 2.0f, 3.0f, 5.0f, 8.0f, @@ -4109,8 +4092,8 @@ LayerTestResult ResizeBilinearMagTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 3, 2, dataLayout); - const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 3, 5, dataLayout); + const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout); + const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout); std::vector inputData({ 1.0f, 2.0f, @@ -4741,7 +4724,7 @@ LayerTestResult L2Normalization1dTest( unsigned int width = 1; - const armnn::TensorShape inputOutputShape = GetTestTensorShape( + const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape( numberOfBatches, numberOfChannels, height, width, layout); std::vector inputValues { @@ -4810,7 +4793,7 @@ LayerTestResult L2Normalization2dTest( unsigned int height = 1; unsigned int width = 5; - const armnn::TensorShape inputOutputShape = GetTestTensorShape( + const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape( numberOfBatches, numberOfChannels, height, width, layout); std::vector inputValues { @@ -4855,7 +4838,7 @@ LayerTestResult L2Normalization3dTest( unsigned int height = 4; unsigned int width = 3; - const armnn::TensorShape inputOutputShape = GetTestTensorShape( + const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape( numberOfBatches, numberOfChannels, height, width, layout); std::vector inputValues { @@ -4920,7 +4903,7 @@ LayerTestResult L2Normalization4dTest( unsigned int height = 4; unsigned int width = 3; - const armnn::TensorShape inputOutputShape = GetTestTensorShape( + const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape( numberOfBatches, numberOfChannels, height, width, layout); std::vector inputValues { diff --git a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp index 9050fc64a6..0f33ac01a5 100644 --- a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp @@ -5,6 +5,7 @@ #pragma once #include "WorkloadTestUtils.hpp" +#include "TensorUtils.hpp" #include "QuantizeHelper.hpp" @@ -50,10 +51,10 @@ LayerTestResult SimplePooling2dTestImpl( unsigned int outputChannels = boost::numeric_cast(outputExpected.shape()[channelsIndex]); unsigned int outputBatchSize = boost::numeric_cast(outputExpected.shape()[0]); - armnn::TensorInfo inputTensorInfo = GetTensorInfo(inputBatchSize, inputChannels, inputHeight, - inputWidth, dataLayout); - armnn::TensorInfo outputTensorInfo = GetTensorInfo(outputBatchSize, outputChannels, outputHeight, - outputWidth, dataLayout); + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(inputBatchSize, inputChannels, inputHeight, + inputWidth, dataLayout); + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(outputBatchSize, outputChannels, outputHeight, + outputWidth, dataLayout); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -252,8 +253,8 @@ LayerTestResult SimpleMaxPooling2dTestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; descriptor.m_DataLayout = dataLayout; - armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); - armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -321,8 +322,8 @@ LayerTestResult SimpleAveragePooling2dTestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; descriptor.m_DataLayout = dataLayout; - armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); - armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -441,8 +442,8 @@ LayerTestResult SimpleL2Pooling2dTestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; descriptor.m_DataLayout = dataLayout; - armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); - armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); std::vector inputData( QuantizedVector(qScale, qOffset, { -- cgit v1.2.1