// // Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "TransposeConvolution2dTestImpl.hpp" #include #include #include #include #include #include #include #include #include #include #include #include namespace { template using TensorData = std::pair>; template void VerifyInputTensorData(const TensorData& data, const std::string& tensorName) { if (data.first.GetNumElements() > data.second.size()) { throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " + std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size())); } } template void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, const armnn::TransposeConvolution2dDescriptor& descriptor, const TensorData& input, TensorData& output, const TensorData& weights, const armnn::Optional>& biases) { IgnoreUnused(memoryManager); using namespace armnn; VerifyInputTensorData(input, "input"); VerifyInputTensorData(weights, "biases"); if (descriptor.m_BiasEnabled) { if (!biases.has_value()) { throw InvalidArgumentException("Bias enabled but no bias data provided"); } VerifyInputTensorData(biases.value(), "biases"); } // set up weights ScopedTensorHandle weightsTensor(weights.first); TransposeConvolution2dQueueDescriptor queueDescriptor; queueDescriptor.m_Parameters = descriptor; queueDescriptor.m_Weight = &weightsTensor; AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data()); std::unique_ptr biasesTensor; if (descriptor.m_BiasEnabled) { // set up biases biasesTensor = std::make_unique(biases.value().first); queueDescriptor.m_Bias = biasesTensor.get(); AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data()); } // set up input and output handles std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(input.first); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(output.first); // set up workload armnn::WorkloadInfo workloadInfo; AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get()); AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get()); std::unique_ptr workload = workloadFactory.CreateWorkload(armnn::LayerType::TransposeConvolution2d, queueDescriptor, workloadInfo); inputHandle->Allocate(); outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), input.second.data()); ExecuteWorkload(*workload, memoryManager); // copy output output.second = std::vector(output.first.GetNumElements(), T()); CopyDataFromITensorHandle(output.second.data(), outputHandle.get()); } template> LayerTestResult TransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, const armnn::TransposeConvolution2dDescriptor& descriptor, armnn::TensorInfo& inputInfo, const std::vector& inputData, armnn::TensorInfo& outputInfo, const std::vector& expectedOutputData, armnn::TensorInfo& weightsInfo, const std::vector& weightsData, armnn::TensorInfo& biasesInfo, const std::vector& biasesData) { using namespace armnn; // set up quantization parameters if (armnn::IsQuantizedType()) { constexpr float qScale = 0.50f; constexpr int32_t qOffset = 10; inputInfo.SetQuantizationScale(qScale); inputInfo.SetQuantizationOffset(qOffset); outputInfo.SetQuantizationScale(qScale); outputInfo.SetQuantizationOffset(qOffset); weightsInfo.SetQuantizationScale(qScale); weightsInfo.SetQuantizationOffset(qOffset); biasesInfo.SetQuantizationScale(qScale * qScale); biasesInfo.SetQuantizationOffset(0); } // set up input TensorData input = { inputInfo, armnnUtils::QuantizedVector(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset()) }; // set up weights TensorData weights = { weightsInfo, armnnUtils::QuantizedVector(weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset()) }; // set up biases using BT = armnn::ResolveType; Optional> optionalBiases; if (descriptor.m_BiasEnabled) { TensorData biases = { biasesInfo, armnnUtils::QuantizedVector(biasesData, biasesInfo.GetQuantizationScale(), biasesInfo.GetQuantizationOffset()) }; optionalBiases = Optional>(biases); } // set up output TensorData output = { outputInfo, {} }; // execute test TransposeConvolution2dTestImpl(workloadFactory, memoryManager, tensorHandleFactory, descriptor, input, output, weights, optionalBiases); // construct result object LayerTestResult testResult(outputInfo); testResult.m_ActualData = output.second; testResult.m_ExpectedData = armnnUtils::QuantizedVector(expectedOutputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset()); return testResult; } template void SwizzleData(armnn::TensorInfo& inputInfo, std::vector& inputData, armnn::TensorInfo& outputInfo, std::vector& outputData, armnn::TensorInfo& weightsInfo, std::vector& weightsData) { PermuteTensorNchwToNhwc(inputInfo, inputData); PermuteTensorNchwToNhwc(outputInfo, outputData); PermuteTensorNchwToNhwc(weightsInfo, weightsData); } } // anonymous namespace template LayerTestResult SimpleTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout) { using namespace armnn; constexpr unsigned int batches = 1u; constexpr unsigned int channels = 1u; constexpr unsigned int wInput = 3u; constexpr unsigned int hInput = wInput; constexpr unsigned int wOutput = 5u; constexpr unsigned int hOutput = wOutput; constexpr unsigned int wWeights = 3u; constexpr unsigned int hWeights = wWeights; TensorShape inputShape = { batches, channels, hInput, wInput }; TensorShape outputShape = { batches, channels, hOutput, wOutput }; TensorShape weightsShape = { batches, channels, hWeights, wWeights }; TensorInfo inputInfo(inputShape, ArmnnType); TensorInfo outputInfo(outputShape, ArmnnType); TensorInfo weightsInfo(weightsShape, ArmnnType); TensorInfo biasesInfo({ channels }, ArmnnBType); std::vector inputData = { 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f }; std::vector weightsData = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f }; std::vector biasesData = { 1.f }; std::vector expectedOutputData = { 1.f, 3.f, 6.f, 5.f, 3.f, 5.f, 12.f, 21.f, 16.f, 9.f, 12.f, 27.f, 45.f, 33.f, 18.f, 11.f, 24.f, 39.f, 28.f, 15.f, 7.f, 15.f, 24.f, 17.f, 9.f }; if (biasEnabled) { // apply bias to expected output data std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(), [&](float f) -> float { return f + biasesData[0]; }); } TransposeConvolution2dDescriptor descriptor; descriptor.m_StrideX = 1; descriptor.m_StrideY = 1; descriptor.m_BiasEnabled = biasEnabled; descriptor.m_DataLayout = layout; // swizzle data if needed if (layout == armnn::DataLayout::NHWC) { SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData); } return TransposeConvolution2dTest(workloadFactory, memoryManager, tensorHandleFactory, descriptor, inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData, biasesInfo, biasesData); } template LayerTestResult PaddedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout) { using namespace armnn; constexpr unsigned int batches = 1u; constexpr unsigned int channels = 1u; constexpr unsigned int wInput = 4u; constexpr unsigned int hInput = wInput; constexpr unsigned int wOutput = 2u; constexpr unsigned int hOutput = wOutput; constexpr unsigned int wWeights = 3u; constexpr unsigned int hWeights = wWeights; TensorShape inputShape = { batches, channels, hInput, wInput }; TensorShape outputShape = { batches, channels, hOutput, wOutput }; TensorShape weightsShape = { batches, channels, hWeights, wWeights }; TensorInfo inputInfo(inputShape, ArmnnType); TensorInfo outputInfo(outputShape, ArmnnType); TensorInfo weightsInfo(weightsShape, ArmnnType); TensorInfo biasesInfo({ channels }, ArmnnBType); std::vector inputData = { 1.f, 3.f, 2.f, 1.f, 1.f, 3.f, 3.f, 1.f, 2.f, 1.f, 1.f, 3.f, 3.f, 2.f, 3.f, 3.f }; std::vector weightsData = { 1.f, 2.f, 3.f, 0.f, 1.f, 0.f, 2.f, 1.f, 2.f }; std::vector biasesData = { 1.f }; std::vector expectedOutputData = { 21.f, 21.f, 28.f, 27.f }; if (biasEnabled) { // apply bias to expected output data std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(), [&](float f) -> float { return f + biasesData[0]; }); } TransposeConvolution2dDescriptor descriptor; descriptor.m_PadLeft = 2; descriptor.m_PadRight = 2; descriptor.m_PadTop = 2; descriptor.m_PadBottom = 2; descriptor.m_StrideX = 1; descriptor.m_StrideY = 1; descriptor.m_BiasEnabled = biasEnabled; descriptor.m_DataLayout = layout; // swizzle data if needed if (layout == armnn::DataLayout::NHWC) { SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData); } return TransposeConvolution2dTest(workloadFactory, memoryManager, tensorHandleFactory, descriptor, inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData, biasesInfo, biasesData); } template LayerTestResult StridedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout) { using namespace armnn; constexpr unsigned int batches = 1u; constexpr unsigned int channels = 1u; constexpr unsigned int wInput = 3u; constexpr unsigned int hInput = wInput; constexpr unsigned int wOutput = 7u; constexpr unsigned int hOutput = wOutput; constexpr unsigned int wWeights = 3u; constexpr unsigned int hWeights = wWeights; TensorShape inputShape = { batches, channels, hInput, wInput }; TensorShape outputShape = { batches, channels, hOutput, wOutput }; TensorShape weightsShape = { batches, channels, hWeights, wWeights }; TensorInfo inputInfo(inputShape, ArmnnType); TensorInfo outputInfo(outputShape, ArmnnType); TensorInfo weightsInfo(weightsShape, ArmnnType); TensorInfo biasesInfo({ channels }, ArmnnBType); std::vector inputData = { 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f }; std::vector weightsData = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f }; std::vector biasesData = { 1.f }; std::vector expectedOutputData = { 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f, 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f, 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f, 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f, 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f, 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f, 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f }; if (biasEnabled) { // apply bias to expected output data std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(), [&](float f) -> float { return f + biasesData[0]; }); } TransposeConvolution2dDescriptor descriptor; descriptor.m_StrideX = 2; descriptor.m_StrideY = 2; descriptor.m_BiasEnabled = biasEnabled; descriptor.m_DataLayout = layout; // swizzle data if needed if (layout == armnn::DataLayout::NHWC) { SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData); } return TransposeConvolution2dTest(workloadFactory, memoryManager, tensorHandleFactory, descriptor, inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData, biasesInfo, biasesData); } template LayerTestResult MultiChannelTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, const armnn::DataLayout layout) { using namespace armnn; TensorShape inputShape = { 1, 1, 2, 2 }; TensorShape outputShape = { 1, 2, 5, 5 }; // OIHW for NCHW; OHWI for NHWC TensorShape weightsShape = { 2, 1, 3, 3 }; TensorShape biasesShape = { 2 }; TensorInfo inputInfo(inputShape, ArmnnType); TensorInfo outputInfo(outputShape, ArmnnType); TensorInfo weightsInfo(weightsShape, ArmnnType); TensorInfo biasesInfo(biasesShape, ArmnnBType); std::vector inputData = { 1.f, 2.f, 3.f, 4.f, }; std::vector weightsData = { 1.f, 3.f, 5.f, 7.f, 9.f, 11.f, 13.f, 15.f, 17.f, 2.f, 4.f, 6.f, 8.f, 10.f, 12.f, 14.f, 16.f, 18.f }; std::vector biasesData = { -1.5f, -2.0f }; std::vector expectedOutputData = { -0.5f, 1.5f, 5.5f, 4.5f, 8.5f, 5.5f, 7.5f, 23.5f, 16.5f, 20.5f, 14.5f, 22.5f, 60.5f, 40.5f, 52.5f, 19.5f, 25.5f, 59.5f, 34.5f, 42.5f, 37.5f, 43.5f, 101.5f, 58.5f, 66.5f, 0.0f, 2.0f, 8.0f, 6.0f, 10.0f, 6.0f, 8.0f, 26.0f, 18.0f, 22.0f, 18.0f, 26.0f, 70.0f, 46.0f, 58.0f, 22.0f, 28.0f, 66.0f, 38.0f, 46.0f, 40.0f, 46.0f, 108.0f, 62.0f, 70.0f }; TransposeConvolution2dDescriptor descriptor; descriptor.m_StrideX = 2; descriptor.m_StrideY = 2; descriptor.m_BiasEnabled = true; descriptor.m_DataLayout = layout; // swizzle data if needed if (layout == armnn::DataLayout::NHWC) { SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData); } return TransposeConvolution2dTest(workloadFactory, memoryManager, tensorHandleFactory, descriptor, inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData, biasesInfo, biasesData); } LayerTestResult TransposeConvolution2dPerAxisQuantTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, const armnn::DataLayout layout) { using namespace armnn; const DataType inputType = DataType::QAsymmU8; const DataType kernelType = DataType::QSymmS8; const DataType biasType = DataType::Signed32; TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10); TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10); const std::vector quantScales{ 0.25f, 0.5f }; constexpr unsigned int quantDimension = 0; TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension); const std::vector biasQuantScales{ 0.125f, 0.25f }; TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension); std::vector inputData = { 12, 14, 16, 18 }; std::vector kernelData = { 4, 12, 20, 28, 36, 44, 52, 60, 68, 4, 8, 12, 16, 20, 24, 28, 32, 36 }; std::vector biasData = { -12, -8 }; std::vector actualOutput(outputInfo.GetNumElements()); std::vector expectedOutputData = { 9, 13, 21, 19, 27, 21, 25, 57, 43, 51, 39, 55, 131, 91, 115, 49, 61, 129, 79, 95, 85, 97, 213, 127, 143, 10, 14, 26, 22, 30, 22, 26, 62, 46, 54, 46, 62, 150, 102, 126, 54, 66, 142, 86, 102, 90, 102, 226, 134, 150 }; if (layout == DataLayout::NHWC) { PermuteTensorNchwToNhwc(inputInfo, inputData); PermuteTensorNchwToNhwc(kernelInfo, kernelData); PermuteTensorNchwToNhwc(outputInfo, expectedOutputData); } TransposeConvolution2dDescriptor descriptor; descriptor.m_StrideX = 2; descriptor.m_StrideY = 2; descriptor.m_BiasEnabled = true; descriptor.m_DataLayout = layout; std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); WorkloadInfo workloadInfo; ScopedTensorHandle weightTensor(kernelInfo); ScopedTensorHandle biasTensor(biasInfo); AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data()); AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data()); TransposeConvolution2dQueueDescriptor queueDescriptor; queueDescriptor.m_Parameters = descriptor; queueDescriptor.m_Weight = &weightTensor; queueDescriptor.m_Bias = &biasTensor; AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get()); AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get()); std::unique_ptr workload = workloadFactory.CreateWorkload(armnn::LayerType::TransposeConvolution2d, queueDescriptor, workloadInfo); inputHandle->Allocate(); outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), inputData.data()); ExecuteWorkload(*workload, memoryManager); CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); return LayerTestResult(actualOutput, expectedOutputData, outputHandle->GetShape(), outputInfo.GetShape()); } // // Explicit template specializations // template LayerTestResult, 4> SimpleTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> SimpleTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> SimpleTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> SimpleTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> PaddedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> PaddedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> PaddedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> PaddedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> StridedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> StridedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> StridedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> StridedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout); template LayerTestResult, 4> MultiChannelTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, const armnn::DataLayout layout); template LayerTestResult, 4> MultiChannelTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, const armnn::DataLayout layout); template LayerTestResult, 4> MultiChannelTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, const armnn::DataLayout layout); template LayerTestResult, 4> MultiChannelTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, const armnn::DataLayout layout);