// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "WorkloadTestUtils.hpp" #include #include #include #include #include #include #include template LayerTestResult SpaceToBatchNdTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::TensorInfo& inputTensorInfo, armnn::TensorInfo& outputTensorInfo, std::vector& inputData, std::vector& outputExpectedData, armnn::SpaceToBatchNdQueueDescriptor descriptor, const float qScale = 1.0f, const int32_t qOffset = 0) { const armnn::PermutationVector NCHWToNHWC = {0, 3, 1, 2}; if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NHWC) { inputTensorInfo = armnnUtils::Permuted(inputTensorInfo, NCHWToNHWC); outputTensorInfo = armnnUtils::Permuted(outputTensorInfo, NCHWToNHWC); std::vector inputTmp(inputData.size()); armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), inputTmp.data()); inputData = inputTmp; std::vector outputTmp(outputExpectedData.size()); armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputExpectedData.data(), outputTmp.data()); outputExpectedData = outputTmp; } if(armnn::IsQuantizedType()) { inputTensorInfo.SetQuantizationScale(qScale); inputTensorInfo.SetQuantizationOffset(qOffset); outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); } boost::multi_array input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); LayerTestResult ret(outputTensorInfo); ret.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, outputExpectedData)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); armnn::WorkloadInfo info; AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); std::unique_ptr workload = workloadFactory.CreateSpaceToBatchNd(descriptor, info); inputHandle->Allocate(); outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); workload->Execute(); CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); return ret; } template LayerTestResult SpaceToBatchNdSimpleTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::DataLayout dataLayout = armnn::DataLayout::NCHW) { armnn::TensorInfo inputTensorInfo; armnn::TensorInfo outputTensorInfo; unsigned int inputShape[] = {1, 1, 2, 2}; unsigned int outputShape[] = {4, 1, 1, 1}; armnn::SpaceToBatchNdQueueDescriptor desc; desc.m_Parameters.m_DataLayout = dataLayout; desc.m_Parameters.m_BlockShape = {2, 2}; desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}}; inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); std::vector input = std::vector( { 1.0f, 2.0f, 3.0f, 4.0f }); std::vector outputExpected = std::vector( { 1.0f, 2.0f, 3.0f, 4.0f }); return SpaceToBatchNdTestImpl( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } template LayerTestResult SpaceToBatchNdMultiChannelsTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::DataLayout dataLayout = armnn::DataLayout::NCHW) { armnn::TensorInfo inputTensorInfo; armnn::TensorInfo outputTensorInfo; unsigned int inputShape[] = {1, 3, 2, 2}; unsigned int outputShape[] = {4, 3, 1, 1}; armnn::SpaceToBatchNdQueueDescriptor desc; desc.m_Parameters.m_DataLayout = dataLayout; desc.m_Parameters.m_BlockShape = {2, 2}; desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}}; inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); std::vector input = std::vector( { 1.0f, 4.0f, 7.0f, 10.0f, 2.0f, 5.0, 8.0, 11.0f, 3.0f, 6.0f, 9.0f, 12.0f }); std::vector outputExpected = std::vector( { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f }); return SpaceToBatchNdTestImpl( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } template LayerTestResult SpaceToBatchNdMultiBlockTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::DataLayout dataLayout = armnn::DataLayout::NCHW) { armnn::TensorInfo inputTensorInfo; armnn::TensorInfo outputTensorInfo; unsigned int inputShape[] = {1, 1, 4, 4}; unsigned int outputShape[] = {4, 1, 2, 2}; armnn::SpaceToBatchNdQueueDescriptor desc; desc.m_Parameters.m_DataLayout = dataLayout; desc.m_Parameters.m_BlockShape = {2, 2}; desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}}; inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); std::vector input = std::vector( { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f }); std::vector outputExpected = std::vector( { 1.0f, 3.0f, 9.0f, 11.0f, 2.0f, 4.0f, 10.0f, 12.0f, 5.0f, 7.0f, 13.0f, 15.0f, 6.0f, 8.0f, 14.0f, 16.0f }); return SpaceToBatchNdTestImpl( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } template LayerTestResult SpaceToBatchNdPaddingTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::DataLayout dataLayout = armnn::DataLayout::NCHW) { armnn::TensorInfo inputTensorInfo; armnn::TensorInfo outputTensorInfo; unsigned int inputShape[] = {2, 1, 2, 4}; unsigned int outputShape[] = {8, 1, 1, 3}; armnn::SpaceToBatchNdQueueDescriptor desc; desc.m_Parameters.m_DataLayout = dataLayout; desc.m_Parameters.m_BlockShape = {2, 2}; desc.m_Parameters.m_PadList = {{0, 0}, {2, 0}}; inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); std::vector input = std::vector( { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f }); std::vector outputExpected = std::vector( { 0.0f, 1.0f, 3.0f, 0.0f, 9.0f, 11.0f, 0.0f, 2.0f, 4.0f, 0.0f, 10.0f, 12.0f, 0.0f, 5.0f, 7.0f, 0.0f, 13.0f, 15.0f, 0.0f, 6.0f, 8.0f, 0.0f, 14.0f, 16.0f }); return SpaceToBatchNdTestImpl( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } template LayerTestResult SpaceToBatchNdSimpleNHWCTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SpaceToBatchNdSimpleTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); } template LayerTestResult SpaceToBatchNdMultiChannelsNHWCTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SpaceToBatchNdMultiChannelsTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); } template LayerTestResult SpaceToBatchNdMultiBlockNHWCTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SpaceToBatchNdMultiBlockTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); } template LayerTestResult SpaceToBatchNdPaddingNHWCTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SpaceToBatchNdPaddingTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); }