diff options
author | Francis Murtagh <francis.murtagh@arm.com> | 2018-10-04 16:03:07 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:58 +0100 |
commit | d59116ecb54c5bfe828d82ea0bc3367bc9b8c5dd (patch) | |
tree | 782d777544416eca8dcf8b924b710a395a75ad36 /src/backends/test/Conv2dTestImpl.hpp | |
parent | de9011bc446d767932b6fec356f65791dff685e5 (diff) | |
download | armnn-d59116ecb54c5bfe828d82ea0bc3367bc9b8c5dd.tar.gz |
IVGCVSW-1889 - Unit test Convolution2d with NHWC
* Added simple convolution Unit test
* Set the data layout correctly in workloads
Change-Id: Ie71b8415f6abc392a84900fc4438b7416fbb558a
Diffstat (limited to 'src/backends/test/Conv2dTestImpl.hpp')
-rw-r--r-- | src/backends/test/Conv2dTestImpl.hpp | 91 |
1 files changed, 91 insertions, 0 deletions
diff --git a/src/backends/test/Conv2dTestImpl.hpp b/src/backends/test/Conv2dTestImpl.hpp index c593c7ba26..8e29615c47 100644 --- a/src/backends/test/Conv2dTestImpl.hpp +++ b/src/backends/test/Conv2dTestImpl.hpp @@ -194,6 +194,97 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(armnn::IWorkloadFactory& workl } template<typename T, typename B> +LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(armnn::IWorkloadFactory& workloadFactory, + const boost::multi_array<T, 4>& input, + const boost::multi_array<T, 4>& kernel, + const boost::multi_array<B, 1>& bias, + const boost::multi_array<T, 4>& outputExpected, + armnn::DataLayout dataLayout, + float qScale, + int32_t qOffset, + uint32_t padLeft = 1, + uint32_t padTop = 1, + uint32_t padRight = 1, + uint32_t padBottom = 1, + uint32_t strideX = 1, + uint32_t strideY = 1) +{ + unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]); + unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]); + unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]); + unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[2]); + + unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]); + unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[3]); + unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[1]); + unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[2]); + + unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]); + unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]); + unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]); + unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]); + + bool biasEnabled = bias.size() > 0; + + // Creates the tensors. + armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels}, + armnn::GetDataType<T>()); + armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, armnn::GetDataType<T>()); + armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>()); + + // Construct the input data. + std::vector<T> inputData; + inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels); + auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData); + + // Construct the output data, with bias applied, as appropriate. + std::vector<T> outputData; + outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels); + + LayerTestResult<T, 4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc); + AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + + armnn::ScopedCpuTensorHandle biasTensor(biasDesc); + + armnn::Convolution2dQueueDescriptor data; + + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs. + data.m_Parameters.m_StrideX = strideX; + data.m_Parameters.m_StrideY = strideY; + data.m_Parameters.m_PadLeft = padLeft; + data.m_Parameters.m_PadRight = padRight; + data.m_Parameters.m_PadTop = padTop; + data.m_Parameters.m_PadBottom = padBottom; + data.m_Parameters.m_BiasEnabled = biasEnabled; + data.m_Parameters.m_DataLayout = dataLayout; + + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info); + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]); + + workloadFactory.Finalize(); + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +template<typename T, typename B> LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(armnn::IWorkloadFactory& workloadFactory, const boost::multi_array<T, 4>& input, const boost::multi_array<T, 4>& kernel, |