aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/LayerTests.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test/LayerTests.cpp')
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp623
1 files changed, 0 insertions, 623 deletions
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index fb07f9fe0b..af426a470b 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -5224,342 +5224,6 @@ LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
}
-LayerTestResult<float, 4> ResizeBilinearNopTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- const armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
-
- const armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
-
- std::vector<float> inputData({
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f,
-
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f
- });
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
- }
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = input;
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
-LayerTestResult<float, 4> SimpleResizeBilinearTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- const armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
-
- const armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
-
- std::vector<float> inputData({
- 1.0f, 255.0f,
- 200.0f, 250.0f,
-
- 250.0f, 200.0f,
- 250.0f, 1.0f
- });
-
- // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
- // then figures out the interpolants and weights. Note this is different to projecting the centre of the
- // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
- // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
- // which we would expect if projecting the centre).
-
- std::vector<float> outputData({
- 1.0f,
-
- 250.0f
- });
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
-LayerTestResult<float, 4> ResizeBilinearSqMinTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- const armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
-
- const armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
-
- std::vector<float> inputData({
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f,
-
- 7.0f, 6.0f, 5.0f, 4.0f,
- 6.0f, 5.0f, 4.0f, 3.0f,
- 5.0f, 4.0f, 3.0f, 2.0f,
- 4.0f, 3.0f, 2.0f, 1.0f
- });
-
- std::vector<float> outputData({
- 1.0f, 3.0f,
- 3.0f, 5.0f,
-
- 7.0f, 5.0f,
- 5.0f, 3.0f
- });
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
-LayerTestResult<float, 4> ResizeBilinearMinTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- const armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
-
- const armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
-
- std::vector<float> inputData({
- 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
- 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
- 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
-
- 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
- 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
- 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
- });
-
- std::vector<float> outputData({
- 1.0f, 2.6666f, 6.00f,
- 78.5f, 179.3333f, 401.00f,
-
- 987.0f, 454.6670f, 203.33f,
- 48.5f, 22.3333f, 10.00f
- });
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
-LayerTestResult<float, 4> ResizeBilinearMagTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- const armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
-
- const armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
-
- std::vector<float> inputData({
- 1.0f, 2.0f,
- 13.0f, 21.0f,
- 144.0f, 233.0f,
-
- 233.0f, 144.0f,
- 21.0f, 13.0f,
- 2.0f, 1.0f
- });
-
- std::vector<float> outputData({
- 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
- 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
- 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
-
- 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
- 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
- 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
- });
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
LayerTestResult<float, 2> FakeQuantizationTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -7634,293 +7298,6 @@ LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
shape0, output, 1.0f, 0);
}
-LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- constexpr unsigned int inputWidth = 4;
- constexpr unsigned int inputHeight = 4;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
-
- constexpr unsigned int outputWidth = inputWidth;
- constexpr unsigned int outputHeight = inputHeight;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
-
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::QuantisedAsymm8);
- inputTensorInfo.SetQuantizationScale(1.5f);
- inputTensorInfo.SetQuantizationOffset(-3);
-
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::QuantisedAsymm8);
- outputTensorInfo.SetQuantizationScale(1.5f);
- outputTensorInfo.SetQuantizationOffset(-3);
-
- auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
- 1, 2, 3, 4,
- 2, 3, 4, 5,
- 3, 4, 5, 6,
- 4, 5, 6, 7
- }));
-
- LayerTestResult<uint8_t, 4> result(outputTensorInfo);
- result.outputExpected = input;
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
-LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- constexpr unsigned int inputWidth = 2;
- constexpr unsigned int inputHeight = 2;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
-
- constexpr unsigned int outputWidth = inputWidth / 2;
- constexpr unsigned int outputHeight = inputHeight / 2;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
-
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::QuantisedAsymm8);
- inputTensorInfo.SetQuantizationScale(0.1567f);
- inputTensorInfo.SetQuantizationOffset(1);
-
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::QuantisedAsymm8);
- outputTensorInfo.SetQuantizationScale(0.1567f);
- outputTensorInfo.SetQuantizationOffset(1);
-
- auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
- 1, 255,
- 200, 250
- }));
-
- // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
- // then figures out the interpolants and weights. Note this is different to projecting the centre of the
- // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
- // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
- // the centre).
- LayerTestResult<uint8_t, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
- 1
- }));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
-LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- constexpr unsigned int inputWidth = 4;
- constexpr unsigned int inputHeight = 4;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
-
- constexpr unsigned int outputWidth = inputWidth / 2;
- constexpr unsigned int outputHeight = inputHeight / 2;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
-
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::QuantisedAsymm8);
- inputTensorInfo.SetQuantizationScale(3.141592f);
- inputTensorInfo.SetQuantizationOffset(3);
-
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::QuantisedAsymm8);
- outputTensorInfo.SetQuantizationScale(3.141592f);
- outputTensorInfo.SetQuantizationOffset(3);
-
- auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
- 1, 2, 3, 4,
- 2, 3, 4, 5,
- 3, 4, 5, 6,
- 4, 5, 6, 7
- }));
-
- LayerTestResult<uint8_t, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
- 1, 3,
- 3, 5
- }));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
-LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- constexpr unsigned int inputWidth = 3;
- constexpr unsigned int inputHeight = 2;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
-
- constexpr unsigned int outputWidth = 2;
- constexpr unsigned int outputHeight = 1;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
-
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::QuantisedAsymm8);
- inputTensorInfo.SetQuantizationScale(1.5f);
- inputTensorInfo.SetQuantizationOffset(-1);
-
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::QuantisedAsymm8);
- outputTensorInfo.SetQuantizationScale(1.5f);
- outputTensorInfo.SetQuantizationOffset(-1);
-
- auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
- 1, 2, 3, // 3.0, 4.5, 6.0
- 5, 8, 13 // 9.0, 13.5, 21.0
- }));
-
- LayerTestResult<uint8_t, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
- 1, 3 // 3.0, 5.25
- }));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
-LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- constexpr unsigned int inputWidth = 2;
- constexpr unsigned int inputHeight = 3;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
-
- constexpr unsigned int outputWidth = 5;
- constexpr unsigned int outputHeight = 3;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
-
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::QuantisedAsymm8);
- inputTensorInfo.SetQuantizationScale(0.010765f);
- inputTensorInfo.SetQuantizationOffset(7);
-
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::QuantisedAsymm8);
- outputTensorInfo.SetQuantizationScale(0.010132f);
- outputTensorInfo.SetQuantizationOffset(-18);
-
- auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
- 24, 228, // 0.183005, 2.379065,
- 105, 128, // 1.05497, 1.302565
- 230, 71 // 2.400595, 0.68896
- }));
-
- LayerTestResult<uint8_t, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
- 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
- 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
- 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
- }));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeBilinearQueueDescriptor descriptor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
LayerTestResult<float, 4> BatchNormTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)