aboutsummaryrefslogtreecommitdiff
path: root/src/backends/test/LayerTests.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/test/LayerTests.cpp')
-rw-r--r--src/backends/test/LayerTests.cpp215
1 files changed, 139 insertions, 76 deletions
diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp
index 066d0c28f4..a7fb6a824e 100644
--- a/src/backends/test/LayerTests.cpp
+++ b/src/backends/test/LayerTests.cpp
@@ -2907,22 +2907,12 @@ LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadF
return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
}
-LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::TensorShape& inputOutputTensorShape,
+ armnn::DataLayout dataLayout)
{
- constexpr unsigned int inputWidth = 4;
- constexpr unsigned int inputHeight = 4;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
-
- constexpr unsigned int outputWidth = inputWidth;
- constexpr unsigned int outputHeight = inputHeight;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
-
- const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::Float32);
+ const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
1.0f, 2.0f, 3.0f, 4.0f,
@@ -2938,6 +2928,7 @@ LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloa
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
armnn::ResizeBilinearQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -2955,26 +2946,33 @@ LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloa
return result;
}
-LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
{
- constexpr unsigned int inputWidth = 2;
- constexpr unsigned int inputHeight = 2;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
+ // BatchSize = 1, Channels = 1, Height = 4, Width = 4
+ const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
- constexpr unsigned int outputWidth = inputWidth / 2;
- constexpr unsigned int outputHeight = inputHeight / 2;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
+ return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
+}
- const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::Float32);
+LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // BatchSize = 1, Height = 4, Width = 4, Channels = 1
+ const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
+
+ return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::TensorShape& inputTensorShape,
+ const armnn::TensorShape& outputTensorShape,
+ armnn::DataLayout dataLayout)
+{
+ const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- 1.0f, 255.0f,
- 200.0f, 250.f,
+ 1.0f, 255.0f,
+ 200.0f, 250.0f
}));
// The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
@@ -2991,6 +2989,7 @@ LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& work
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
armnn::ResizeBilinearQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3008,22 +3007,35 @@ LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& work
return result;
}
-LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
{
- constexpr unsigned int inputWidth = 4;
- constexpr unsigned int inputHeight = 4;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
+ // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
+ const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
- constexpr unsigned int outputWidth = inputWidth / 2;
- constexpr unsigned int outputHeight = inputHeight / 2;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
+ // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
+ const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
- const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::Float32);
+ return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
+ const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
+
+ // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
+ const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
+
+ return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::TensorShape& inputTensorShape,
+ const armnn::TensorShape& outputTensorShape,
+ armnn::DataLayout dataLayout)
+{
+ const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
1.0f, 2.0f, 3.0f, 4.0f,
@@ -3034,14 +3046,15 @@ LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workl
LayerTestResult<float, 4> result(outputTensorInfo);
result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
- 1.f, 3.f,
- 3.f, 5.f
+ 1.0f, 3.0f,
+ 3.0f, 5.0f
}));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
armnn::ResizeBilinearQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3059,22 +3072,35 @@ LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workl
return result;
}
-LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
{
- constexpr unsigned int inputWidth = 5;
- constexpr unsigned int inputHeight = 3;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
+ // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
+ const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
- constexpr unsigned int outputWidth = 3;
- constexpr unsigned int outputHeight = 2;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
+ // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
+ const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
- const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::Float32);
+ return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
+ const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
+
+ // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
+ const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
+
+ return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::TensorShape& inputTensorShape,
+ const armnn::TensorShape& outputTensorShape,
+ armnn::DataLayout dataLayout)
+{
+ const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
@@ -3084,14 +3110,15 @@ LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloa
LayerTestResult<float, 4> result(outputTensorInfo);
result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
- 1.0f, 2.6666f, 6.0f,
- 78.5f, 179.3333f, 401.f
+ 1.0f, 2.6666f, 6.0f,
+ 78.5f, 179.3333f, 401.0f
}));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
armnn::ResizeBilinearQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3109,22 +3136,35 @@ LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloa
return result;
}
-LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
{
- constexpr unsigned int inputWidth = 2;
- constexpr unsigned int inputHeight = 3;
- constexpr unsigned int inputChannels = 1;
- constexpr unsigned int inputBatchSize = 1;
+ // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
+ const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
- constexpr unsigned int outputWidth = 5;
- constexpr unsigned int outputHeight = 3;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
+ // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
+ const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
- const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::Float32);
+ return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
+ const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
+
+ // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
+ const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
+
+ return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::TensorShape& inputTensorShape,
+ const armnn::TensorShape& outputTensorShape,
+ armnn::DataLayout dataLayout)
+{
+ const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
1.0f, 2.0f,
@@ -3134,15 +3174,16 @@ LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloa
LayerTestResult<float, 4> result(outputTensorInfo);
result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
- 1.0f, 1.4f, 1.8f, 2.f, 2.f,
- 13.f, 16.2f, 19.4f, 21.f, 21.f,
- 144.f, 179.6f, 215.2f, 233.f, 233.f
+ 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
+ 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
+ 144.0f, 179.6f, 215.2f, 233.0f, 233.0f
}));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
armnn::ResizeBilinearQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3160,6 +3201,28 @@ LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloa
return result;
}
+LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
+ const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
+
+ // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
+ const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
+
+ return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
+ const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
+
+ // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
+ const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
+
+ return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
{
constexpr unsigned int width = 2;