From c4223d135b1637ba05141d5375a22a182a03d4b3 Mon Sep 17 00:00:00 2001 From: Ellen Norris-Thompson Date: Mon, 10 Jun 2019 14:15:12 +0100 Subject: IVGCVSW-3217 Refactor the Layer tests for ResizeBilinear to make them generic * Refactored the ResizeBilinear tests so can be used for both Float32 and Uint8. * Moved to .hpp file and renamed tests accordingly. Signed-off-by: Ellen Norris-Thompson Change-Id: Icf79b0616db0c307cfcf94747fe0a6d4343588bd --- src/backends/backendsCommon/test/LayerTests.cpp | 623 ------------------------ src/backends/backendsCommon/test/LayerTests.hpp | 606 +++++++++++++++++++++-- src/backends/cl/test/ClLayerTests.cpp | 30 +- src/backends/neon/test/NeonLayerTests.cpp | 68 ++- src/backends/reference/test/RefLayerTests.cpp | 75 ++- 5 files changed, 697 insertions(+), 705 deletions(-) diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index fb07f9fe0b..af426a470b 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -5224,342 +5224,6 @@ LayerTestResult Concatenation4dDiffShapeDim3Test( workloadFactory, memoryManager, 0.0f, 0, useSubtensor); } -LayerTestResult ResizeBilinearNopTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout) -{ - const armnn::TensorInfo inputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32); - - const armnn::TensorInfo outputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32); - - std::vector inputData({ - 1.0f, 2.0f, 3.0f, 4.0f, - 2.0f, 3.0f, 4.0f, 5.0f, - 3.0f, 4.0f, 5.0f, 6.0f, - 4.0f, 5.0f, 6.0f, 7.0f, - - 1.0f, 2.0f, 3.0f, 4.0f, - 2.0f, 3.0f, 4.0f, 5.0f, - 3.0f, 4.0f, 5.0f, 6.0f, - 4.0f, 5.0f, 6.0f, 7.0f - }); - - const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout == armnn::DataLayout::NHWC) - { - std::vector tmp(inputData.size()); - armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); - inputData = tmp; - } - - auto input = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = input; - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - descriptor.m_Parameters.m_DataLayout = dataLayout; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - -LayerTestResult SimpleResizeBilinearTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout) -{ - const armnn::TensorInfo inputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32); - - const armnn::TensorInfo outputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32); - - std::vector inputData({ - 1.0f, 255.0f, - 200.0f, 250.0f, - - 250.0f, 200.0f, - 250.0f, 1.0f - }); - - // The 'resize bilinear' operation projects the top-left corner of output texels into the input image, - // then figures out the interpolants and weights. Note this is different to projecting the centre of the - // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as - // its single element, the value that was at position (0,0) of the input matrix (rather than an average, - // which we would expect if projecting the centre). - - std::vector outputData({ - 1.0f, - - 250.0f - }); - - const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout == armnn::DataLayout::NHWC) - { - std::vector tmp(inputData.size()); - armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); - inputData = tmp; - - std::vector tmp1(outputData.size()); - armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); - outputData = tmp1; - } - - auto input = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, outputData); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - descriptor.m_Parameters.m_DataLayout = dataLayout; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - -LayerTestResult ResizeBilinearSqMinTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout) -{ - const armnn::TensorInfo inputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32); - - const armnn::TensorInfo outputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32); - - std::vector inputData({ - 1.0f, 2.0f, 3.0f, 4.0f, - 2.0f, 3.0f, 4.0f, 5.0f, - 3.0f, 4.0f, 5.0f, 6.0f, - 4.0f, 5.0f, 6.0f, 7.0f, - - 7.0f, 6.0f, 5.0f, 4.0f, - 6.0f, 5.0f, 4.0f, 3.0f, - 5.0f, 4.0f, 3.0f, 2.0f, - 4.0f, 3.0f, 2.0f, 1.0f - }); - - std::vector outputData({ - 1.0f, 3.0f, - 3.0f, 5.0f, - - 7.0f, 5.0f, - 5.0f, 3.0f - }); - - const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout == armnn::DataLayout::NHWC) - { - std::vector tmp(inputData.size()); - armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); - inputData = tmp; - - std::vector tmp1(outputData.size()); - armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); - outputData = tmp1; - } - - auto input = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, outputData); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - descriptor.m_Parameters.m_DataLayout = dataLayout; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - -LayerTestResult ResizeBilinearMinTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout) -{ - const armnn::TensorInfo inputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32); - - const armnn::TensorInfo outputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32); - - std::vector inputData({ - 1.0f, 2.0f, 3.0f, 5.0f, 8.0f, - 13.0f, 21.0f, 34.0f, 55.0f, 89.0f, - 144.0f, 233.0f, 377.0f, 610.0f, 987.0f, - - 987.0f, 610.0f, 377.0f, 233.0f, 144.0f, - 89.0f, 55.0f, 34.0f, 21.0f, 13.0f, - 8.0f, 5.0f, 3.0f, 2.0f, 1.0f - }); - - std::vector outputData({ - 1.0f, 2.6666f, 6.00f, - 78.5f, 179.3333f, 401.00f, - - 987.0f, 454.6670f, 203.33f, - 48.5f, 22.3333f, 10.00f - }); - - const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout == armnn::DataLayout::NHWC) - { - std::vector tmp(inputData.size()); - armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); - inputData = tmp; - - std::vector tmp1(outputData.size()); - armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); - outputData = tmp1; - } - - auto input = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, outputData); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - descriptor.m_Parameters.m_DataLayout = dataLayout; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - -LayerTestResult ResizeBilinearMagTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout) -{ - const armnn::TensorInfo inputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32); - - const armnn::TensorInfo outputTensorInfo = - armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32); - - std::vector inputData({ - 1.0f, 2.0f, - 13.0f, 21.0f, - 144.0f, 233.0f, - - 233.0f, 144.0f, - 21.0f, 13.0f, - 2.0f, 1.0f - }); - - std::vector outputData({ - 1.0f, 1.4f, 1.8f, 2.0f, 2.0f, - 13.0f, 16.2f, 19.4f, 21.0f, 21.0f, - 144.0f, 179.6f, 215.2f, 233.0f, 233.0f, - - 233.0f, 197.4f, 161.8f, 144.0f, 144.0f, - 21.0f, 17.8f, 14.6f, 13.0f, 13.0f, - 2.0f, 1.6f, 1.2f, 1.0f, 1.0f - }); - - const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; - if (dataLayout == armnn::DataLayout::NHWC) - { - std::vector tmp(inputData.size()); - armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); - inputData = tmp; - - std::vector tmp1(outputData.size()); - armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); - outputData = tmp1; - } - - auto input = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, outputData); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - descriptor.m_Parameters.m_DataLayout = dataLayout; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - LayerTestResult FakeQuantizationTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -7634,293 +7298,6 @@ LayerTestResult SubtractionBroadcastInt16Test( shape0, output, 1.0f, 0); } -LayerTestResult ResizeBilinearNopUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - constexpr unsigned int inputWidth = 4; - constexpr unsigned int inputHeight = 4; - constexpr unsigned int inputChannels = 1; - constexpr unsigned int inputBatchSize = 1; - - constexpr unsigned int outputWidth = inputWidth; - constexpr unsigned int outputHeight = inputHeight; - constexpr unsigned int outputChannels = inputChannels; - constexpr unsigned int outputBatchSize = inputBatchSize; - - armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, - armnn::DataType::QuantisedAsymm8); - inputTensorInfo.SetQuantizationScale(1.5f); - inputTensorInfo.SetQuantizationOffset(-3); - - armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, - armnn::DataType::QuantisedAsymm8); - outputTensorInfo.SetQuantizationScale(1.5f); - outputTensorInfo.SetQuantizationOffset(-3); - - auto input = MakeTensor(inputTensorInfo, std::vector({ - 1, 2, 3, 4, - 2, 3, 4, 5, - 3, 4, 5, 6, - 4, 5, 6, 7 - })); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = input; - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - -LayerTestResult SimpleResizeBilinearUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - constexpr unsigned int inputWidth = 2; - constexpr unsigned int inputHeight = 2; - constexpr unsigned int inputChannels = 1; - constexpr unsigned int inputBatchSize = 1; - - constexpr unsigned int outputWidth = inputWidth / 2; - constexpr unsigned int outputHeight = inputHeight / 2; - constexpr unsigned int outputChannels = inputChannels; - constexpr unsigned int outputBatchSize = inputBatchSize; - - armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, - armnn::DataType::QuantisedAsymm8); - inputTensorInfo.SetQuantizationScale(0.1567f); - inputTensorInfo.SetQuantizationOffset(1); - - armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, - armnn::DataType::QuantisedAsymm8); - outputTensorInfo.SetQuantizationScale(0.1567f); - outputTensorInfo.SetQuantizationOffset(1); - - auto input = MakeTensor(inputTensorInfo, std::vector({ - 1, 255, - 200, 250 - })); - - // The 'resize bilinear' operation projects the top-left corner of output texels into the input image, - // then figures out the interpolants and weights. Note this is different to projecting the centre of the - // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value - // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting - // the centre). - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector({ - 1 - })); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - -LayerTestResult ResizeBilinearSqMinUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - constexpr unsigned int inputWidth = 4; - constexpr unsigned int inputHeight = 4; - constexpr unsigned int inputChannels = 1; - constexpr unsigned int inputBatchSize = 1; - - constexpr unsigned int outputWidth = inputWidth / 2; - constexpr unsigned int outputHeight = inputHeight / 2; - constexpr unsigned int outputChannels = inputChannels; - constexpr unsigned int outputBatchSize = inputBatchSize; - - armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, - armnn::DataType::QuantisedAsymm8); - inputTensorInfo.SetQuantizationScale(3.141592f); - inputTensorInfo.SetQuantizationOffset(3); - - armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, - armnn::DataType::QuantisedAsymm8); - outputTensorInfo.SetQuantizationScale(3.141592f); - outputTensorInfo.SetQuantizationOffset(3); - - auto input = MakeTensor(inputTensorInfo, std::vector({ - 1, 2, 3, 4, - 2, 3, 4, 5, - 3, 4, 5, 6, - 4, 5, 6, 7 - })); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector({ - 1, 3, - 3, 5 - })); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - -LayerTestResult ResizeBilinearMinUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - constexpr unsigned int inputWidth = 3; - constexpr unsigned int inputHeight = 2; - constexpr unsigned int inputChannels = 1; - constexpr unsigned int inputBatchSize = 1; - - constexpr unsigned int outputWidth = 2; - constexpr unsigned int outputHeight = 1; - constexpr unsigned int outputChannels = inputChannels; - constexpr unsigned int outputBatchSize = inputBatchSize; - - armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, - armnn::DataType::QuantisedAsymm8); - inputTensorInfo.SetQuantizationScale(1.5f); - inputTensorInfo.SetQuantizationOffset(-1); - - armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, - armnn::DataType::QuantisedAsymm8); - outputTensorInfo.SetQuantizationScale(1.5f); - outputTensorInfo.SetQuantizationOffset(-1); - - auto input = MakeTensor(inputTensorInfo, std::vector({ - 1, 2, 3, // 3.0, 4.5, 6.0 - 5, 8, 13 // 9.0, 13.5, 21.0 - })); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector({ - 1, 3 // 3.0, 5.25 - })); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - -LayerTestResult ResizeBilinearMagUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - constexpr unsigned int inputWidth = 2; - constexpr unsigned int inputHeight = 3; - constexpr unsigned int inputChannels = 1; - constexpr unsigned int inputBatchSize = 1; - - constexpr unsigned int outputWidth = 5; - constexpr unsigned int outputHeight = 3; - constexpr unsigned int outputChannels = inputChannels; - constexpr unsigned int outputBatchSize = inputBatchSize; - - armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, - armnn::DataType::QuantisedAsymm8); - inputTensorInfo.SetQuantizationScale(0.010765f); - inputTensorInfo.SetQuantizationOffset(7); - - armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, - armnn::DataType::QuantisedAsymm8); - outputTensorInfo.SetQuantizationScale(0.010132f); - outputTensorInfo.SetQuantizationOffset(-18); - - auto input = MakeTensor(inputTensorInfo, std::vector({ - 24, 228, // 0.183005, 2.379065, - 105, 128, // 1.05497, 1.302565 - 230, 71 // 2.400595, 0.68896 - })); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector({ - 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504 - 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498 - 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002 - })); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ResizeBilinearQueueDescriptor descriptor; - armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; -} - LayerTestResult BatchNormTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 19105855e1..8bbd0d47c8 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -10,6 +10,8 @@ #include #include "TensorCopyUtils.hpp" #include "WorkloadTestUtils.hpp" +#include "TensorUtils.hpp" +#include "Permute.hpp" #include #include @@ -843,36 +845,41 @@ LayerTestResult TanhInt16Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -// Tests that the output should be identical to the input when the output dimensions match the input ones. -LayerTestResult ResizeBilinearNopTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout); +/// Tests that the output should be identical to the input when the output dimensions match the input ones. +template> +LayerTestResult ResizeBilinearNopTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); // Tests the behaviour of the resize bilinear operation when rescaling a 2x2 image into a 1x1 image. -LayerTestResult SimpleResizeBilinearTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout); +template> +LayerTestResult SimpleResizeBilinearTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); // Tests the resize bilinear for minification of a square input matrix (also: input dimensions are a // multiple of output dimensions). -LayerTestResult ResizeBilinearSqMinTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout); +template> +LayerTestResult ResizeBilinearSqMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); // Tests the resize bilinear for minification (output dimensions smaller than input dimensions). -LayerTestResult ResizeBilinearMinTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout); +template> +LayerTestResult ResizeBilinearMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); // Tests the resize bilinear for magnification (output dimensions bigger than input dimensions). -LayerTestResult ResizeBilinearMagTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::DataLayout dataLayout); +template> +LayerTestResult ResizeBilinearMagTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); template> LayerTestResult Rsqrt2dTestCommon( @@ -1162,26 +1169,6 @@ LayerTestResult ConstantLinearActivationInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult ResizeBilinearNopUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); - -LayerTestResult SimpleResizeBilinearUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); - -LayerTestResult ResizeBilinearSqMinUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); - -LayerTestResult ResizeBilinearMinUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); - -LayerTestResult ResizeBilinearMagUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); - LayerTestResult BatchNormUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -2328,3 +2315,540 @@ LayerTestResult SimpleFloorTest( return ret; } + + +template +LayerTestResult ResizeBilinearNopTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType); + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.5f); + inputTensorInfo.SetQuantizationOffset(-3); + outputTensorInfo.SetQuantizationScale(1.5f); + outputTensorInfo.SetQuantizationOffset(-3); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1, 2, 3, 4, + 2, 3, 4, 5, + 3, 4, 5, 6, + 4, 5, 6, 7 + } + : std::initializer_list + { + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f, + + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = input; + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template LayerTestResult, 4> +ResizeBilinearNopTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeBilinearNopTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template +LayerTestResult SimpleResizeBilinearTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 1, 1, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(0.1567f); + inputTensorInfo.SetQuantizationOffset(1); + outputTensorInfo.SetQuantizationScale(0.1567f); + outputTensorInfo.SetQuantizationOffset(1); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1, 255, + 200, 250 + } + : std::initializer_list + { + 1.0f, 255.0f, + 200.0f, 250.0f, + + 250.0f, 200.0f, + 250.0f, 1.0f + }; + + // The 'resize bilinear' operation projects the top-left corner of output texels into the input image, + // then figures out the interpolants and weights. Note this is different to projecting the centre of the + // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as + // its single element, the value that was at position (0,0) of the input matrix (rather than an average, + // which we would expect if projecting the centre). + + std::vector outputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1 + } + : std::initializer_list + { + 1.0f, + + 250.0f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputData)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template LayerTestResult, 4> +SimpleResizeBilinearTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +SimpleResizeBilinearTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template +LayerTestResult ResizeBilinearSqMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(3.141592f); + inputTensorInfo.SetQuantizationOffset(3); + outputTensorInfo.SetQuantizationScale(3.141592f); + outputTensorInfo.SetQuantizationOffset(3); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1, 2, 3, 4, + 2, 3, 4, 5, + 3, 4, 5, 6, + 4, 5, 6, 7 + } + : std::initializer_list + { + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f, + + 7.0f, 6.0f, 5.0f, 4.0f, + 6.0f, 5.0f, 4.0f, 3.0f, + 5.0f, 4.0f, 3.0f, 2.0f, + 4.0f, 3.0f, 2.0f, 1.0f + }; + + std::vector outputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1, 3, + 3, 5 + } + : std::initializer_list + { + 1.0f, 3.0f, + 3.0f, 5.0f, + + 7.0f, 5.0f, + 5.0f, 3.0f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputData)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template LayerTestResult, 4> +ResizeBilinearSqMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeBilinearSqMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template +LayerTestResult ResizeBilinearMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 2, 3, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 1, 2, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.5f); + inputTensorInfo.SetQuantizationOffset(-1); + outputTensorInfo.SetQuantizationScale(1.5f); + outputTensorInfo.SetQuantizationOffset(-1); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 3.0f, 4.5f, 6.0f, // 1, 2, 3, : Expected quantised values + 9.0f, 13.5f, 21.0f // 5, 8, 13 + } + : std::initializer_list + { + 1.0f, 2.0f, 3.0f, 5.0f, 8.0f, + 13.0f, 21.0f, 34.0f, 55.0f, 89.0f, + 144.0f, 233.0f, 377.0f, 610.0f, 987.0f, + + 987.0f, 610.0f, 377.0f, 233.0f, 144.0f, + 89.0f, 55.0f, 34.0f, 21.0f, 13.0f, + 8.0f, 5.0f, 3.0f, 2.0f, 1.0f + }; + + std::vector outputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 3.0f, 5.25f // 1, 3 + } + : std::initializer_list + { + 1.0f, 2.6666f, 6.00f, + 78.5f, 179.3333f, 401.00f, + + 987.0f, 454.6670f, 203.33f, + 48.5f, 22.3333f, 10.00f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputData)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template LayerTestResult, 4> +ResizeBilinearMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeBilinearMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template +LayerTestResult ResizeBilinearMagTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 3, 5, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(0.010765f); + inputTensorInfo.SetQuantizationOffset(7); + outputTensorInfo.SetQuantizationScale(0.010132f); + outputTensorInfo.SetQuantizationOffset(-18); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 0.183005f, 2.379065f, // 24, 228, : Expected quantised values + 1.05497f, 1.302565f, // 105, 128, + 2.400595f, 0.68896f // 230, 71 + } + : std::initializer_list + { + 1.0f, 2.0f, + 13.0f, 21.0f, + 144.0f, 233.0f, + + 233.0f, 144.0f, + 21.0f, 13.0f, + 2.0f, 1.0f + }; + std::vector outputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 0.18300501f, 1.06142902f, 1.93985295f, 2.37906504f, 2.37906504f, + 1.05497003f, 1.15400803f, 1.25304604f, 1.30256498f, 1.30256498f, + 2.40059495f, 1.71594095f, 1.03128707f, 0.68896002f, 0.68896002f + // 0, 87, 173, 217, 217, : Expected quantised values + // 86, 96, 106, 111, 111, + // 219, 151, 84, 50, 50 + } + : std::initializer_list + { + 1.0f, 1.4f, 1.8f, 2.0f, 2.0f, + 13.0f, 16.2f, 19.4f, 21.0f, 21.0f, + 144.0f, 179.6f, 215.2f, 233.0f, 233.0f, + + 233.0f, 197.4f, 161.8f, 144.0f, 144.0f, + 21.0f, 17.8f, 14.6f, 13.0f, 13.0f, + 2.0f, 1.6f, 1.2f, 1.0f, 1.0f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputData)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template LayerTestResult, 4> +ResizeBilinearMagTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeBilinearMagTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); \ No newline at end of file diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index b92e88ef1a..fee980c8a0 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -245,18 +245,28 @@ ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dTest, armnn::DataLa ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dTest, armnn::DataLayout::NHWC) // Resize Bilinear - NCHW data layout -ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW) // Resize Bilinear - NHWC data layout -ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, + ResizeBilinearNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, + SimpleResizeBilinearTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, + ResizeBilinearSqMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, + ResizeBilinearMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, + ResizeBilinearMagTest, + armnn::DataLayout::NHWC) // Constant ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index f842892864..4e719d2df8 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -471,24 +471,60 @@ ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest) ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest) // Resize Bilinear - NCHW data layout -ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW) - -ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopUint8Test) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW) + +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, + SimpleResizeBilinearTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, + ResizeBilinearNopTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, + ResizeBilinearSqMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, + ResizeBilinearMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, + ResizeBilinearMagTest, + armnn::DataLayout::NCHW) // Resize Bilinear - NHWC data layout -ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, + ResizeBilinearNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, + SimpleResizeBilinearTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, + ResizeBilinearSqMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, + ResizeBilinearMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, + ResizeBilinearMagTest, + armnn::DataLayout::NHWC) + +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc, + ResizeBilinearNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc, + SimpleResizeBilinearTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc, + ResizeBilinearSqMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc, + ResizeBilinearMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc, + ResizeBilinearMagTest, + armnn::DataLayout::NHWC) // Quantize ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test) diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index efdd1efb82..fd01550186 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -401,23 +401,68 @@ ARMNN_AUTO_TEST_CASE(BatchNormInt16, BatchNormInt16Test) ARMNN_AUTO_TEST_CASE(BatchNormInt16Nhwc, BatchNormInt16NhwcTest) // Resize Bilinear - NCHW -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, + SimpleResizeBilinearTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, + SimpleResizeBilinearTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, + ResizeBilinearNopTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, + ResizeBilinearNopTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, + ResizeBilinearSqMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, + ResizeBilinearSqMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, + ResizeBilinearMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, + ResizeBilinearMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, + ResizeBilinearMagTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, + ResizeBilinearMagTest, + armnn::DataLayout::NCHW) // Resize Bilinear - NHWC -ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, + ResizeBilinearNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc, + ResizeBilinearNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, + SimpleResizeBilinearTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc, + SimpleResizeBilinearTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, + ResizeBilinearSqMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc, + ResizeBilinearSqMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, + ResizeBilinearMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc, + ResizeBilinearMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, + ResizeBilinearMagTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc, + ResizeBilinearMagTest, + armnn::DataLayout::NHWC) // Fake Quantization ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest) -- cgit v1.2.1