// // Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "InstanceNormalizationTestImpl.hpp" #include #include #include #include #include #include #include #include #include namespace { template> LayerTestResult InstanceNormTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, const armnn::TensorInfo& inputTensorInfo, const armnn::TensorInfo& outputTensorInfo, const std::vector& inputValues, const std::vector& expectedOutputValues, armnn::InstanceNormalizationQueueDescriptor descriptor, float qScale = 0.0f, int32_t qOffset = 0) { IgnoreUnused(memoryManager); std::vector inputTensor = armnnUtils::QuantizedVector(inputValues, qScale, qOffset); std::vector expectedOutput = armnnUtils::QuantizedVector(expectedOutputValues, qScale, qOffset); std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); armnn::WorkloadInfo info; AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); std::unique_ptr workload = workloadFactory.CreateWorkload(armnn::LayerType::InstanceNormalization, descriptor, info); inputHandle->Allocate(); outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), inputTensor.data()); workload->Execute(); CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); return LayerTestResult(actualOutput, expectedOutput, outputHandle->GetShape(), outputTensorInfo.GetShape()); } template> LayerTestResult InstanceNormTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, armnn::DataLayout dataLayout) { // BatchSize: 2 // Height: 2 // Width: 2 // Channels: 2 const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 }; armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType); armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType); std::vector inputValues { // Batch 0, Height 0, Width 0 x Channel (2) 0.f, 1.f, // Batch 0, Height 0, Width 1 x Channel (2) 0.f, 2.f, // Batch 0, Height 1, Width 0 x Channel (2) 0.f, 2.f, // Batch 0, Height 1, Width 1 x Channel (2) 0.f, 4.f, // Batch 1, Height 0, Width 0 x Channel (2) 1.f, -1.f, // Batch 1, Height 0, Width 1 x Channel (2) -1.f, 2.f, // Batch 1, Height 1, Width 0 x Channel (2) -1.f, -2.f, // Batch 1, Height 1, Width 1 x Channel (2) 1.f, 4.f }; std::vector expectedOutputValues { // Batch 0, Height 0, Width 0 x Channel (2) 0.f, -1.1470304f, // Batch 0, Height 0, Width 1 x Channel (2) 0.f, -0.22940612f, // Batch 0, Height 1, Width 0 x Channel (2) 0.f, -0.22940612f, // Batch 0, Height 1, Width 1 x Channel (2) 0.f, 1.6058424f, // Batch 1, Height 0, Width 0 x Channel (2) 0.99995005f, -0.7337929f, // Batch 1, Height 0, Width 1 x Channel (2) -0.99995005f, 0.52413774f, // Batch 1, Height 1, Width 0 x Channel (2) -0.99995005f, -1.1531031f, // Batch 1, Height 1, Width 1 x Channel (2) 0.99995005f, 1.3627582f }; if (dataLayout == armnn::DataLayout::NCHW) { PermuteTensorNhwcToNchw(inputTensorInfo, inputValues); PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues); } armnn::InstanceNormalizationQueueDescriptor descriptor; descriptor.m_Parameters.m_Eps = 0.0001f; descriptor.m_Parameters.m_Beta = 0.0f; descriptor.m_Parameters.m_Gamma = 1.0f; descriptor.m_Parameters.m_DataLayout = dataLayout; return InstanceNormTestImpl( workloadFactory, memoryManager, tensorHandleFactory, inputTensorInfo, outputTensorInfo, inputValues, expectedOutputValues, descriptor); } template> LayerTestResult InstanceNormTest2( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, armnn::DataLayout dataLayout) { // BatchSize: 2 // Height: 2 // Width: 2 // Channels: 2 const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 }; armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType); armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType); std::vector inputValues { // Batch 0, Height 0, Width 0 x Channel (2) 0.f, 1.f, // Batch 0, Height 0, Width 1 x Channel (2) 0.f, 2.f, // Batch 0, Height 1, Width 0 x Channel (2) 0.f, 2.f, // Batch 0, Height 1, Width 1 x Channel (2) 0.f, 4.f, // Batch 1, Height 0, Width 0 x Channel (2) 1.f, -1.f, // Batch 1, Height 0, Width 1 x Channel (2) -1.f, 2.f, // Batch 1, Height 1, Width 0 x Channel (2) -1.f, -2.f, // Batch 1, Height 1, Width 1 x Channel (2) 1.f, 4.f }; std::vector expectedOutputValues { // Batch 0, Height 0, Width 0 x Channel (2) 10.f, 7.7059393f, // Batch 0, Height 0, Width 1 x Channel (2) 10.f, 9.541187f, // Batch 0, Height 1, Width 0 x Channel (2) 10.f, 9.541187f, // Batch 0, Height 1, Width 1 x Channel (2) 10.f, 13.211685f, // Batch 1, Height 0, Width 0 x Channel (2) 11.9999f, 8.532414f, // Batch 1, Height 0, Width 1 x Channel (2) 8.0001f, 11.048275f, // Batch 1, Height 1, Width 0 x Channel (2) 8.0001f, 7.693794f, // Batch 1, Height 1, Width 1 x Channel (2) 11.9999f, 12.725516f }; if (dataLayout == armnn::DataLayout::NCHW) { PermuteTensorNhwcToNchw(inputTensorInfo, inputValues); PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues); } armnn::InstanceNormalizationQueueDescriptor descriptor; descriptor.m_Parameters.m_Eps = 0.0001f; descriptor.m_Parameters.m_Beta = 10.0f; descriptor.m_Parameters.m_Gamma = 2.0f; descriptor.m_Parameters.m_DataLayout = dataLayout; return InstanceNormTestImpl( workloadFactory, memoryManager, tensorHandleFactory, inputTensorInfo, outputTensorInfo, inputValues, expectedOutputValues, descriptor); } } // anonymous namespace LayerTestResult InstanceNormFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, armnn::DataLayout dataLayout) { return InstanceNormTest(workloadFactory, memoryManager, tensorHandleFactory, dataLayout); } LayerTestResult InstanceNormFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, armnn::DataLayout dataLayout) { return InstanceNormTest(workloadFactory, memoryManager, tensorHandleFactory, dataLayout); } LayerTestResult InstanceNormFloat32Test2( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, armnn::DataLayout dataLayout) { return InstanceNormTest2(workloadFactory, memoryManager, tensorHandleFactory, dataLayout); } LayerTestResult InstanceNormFloat16Test2( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, armnn::DataLayout dataLayout) { return InstanceNormTest2(workloadFactory, memoryManager, tensorHandleFactory, dataLayout); }