aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-10-28 16:06:50 +0000
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-10-29 09:52:55 +0000
commitb3670b106bfef93aa2ab800785ff1e982e5bbdb3 (patch)
tree18438d14e5885d419bf48f62abfd17d6f13ff366
parent102cdbd76edcd033a2f1c35f4d6853f881bcc1b4 (diff)
downloadarmnn-b3670b106bfef93aa2ab800785ff1e982e5bbdb3.tar.gz
Refactor Resize layer tests
* Move implementations to newly created source file ResizeTestImpl.cpp, leave only the declarations in the header * Reduce code duplication by extracting common code into a generic implementation template Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: Ib8aa395e5fb4470b26cbe943c177f6b64c860a93
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt1
-rw-r--r--src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp792
-rw-r--r--src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp1008
4 files changed, 827 insertions, 975 deletions
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 5024f1efbb..de0da68149 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -67,6 +67,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/PadTestImpl.cpp \
test/layerTests/Pooling2dTestImpl.cpp \
test/layerTests/ReshapeTestImpl.cpp \
+ test/layerTests/ResizeTestImpl.cpp \
test/layerTests/RsqrtTestImpl.cpp \
test/layerTests/SliceTestImpl.cpp \
test/layerTests/QuantizeTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index f310ef7564..9c86cdf3c1 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -116,6 +116,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/QuantizeTestImpl.hpp
layerTests/ReshapeTestImpl.cpp
layerTests/ReshapeTestImpl.hpp
+ layerTests/ResizeTestImpl.cpp
layerTests/ResizeTestImpl.hpp
layerTests/RsqrtTestImpl.cpp
layerTests/RsqrtTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
new file mode 100644
index 0000000000..895afe06af
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
@@ -0,0 +1,792 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ResizeTestImpl.hpp"
+
+#include <DataLayoutIndexed.hpp>
+#include <Permute.hpp>
+#include <QuantizeHelper.hpp>
+#include <TensorUtils.hpp>
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/test/DataLayoutUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace
+{
+
+struct ResizeTestParams
+{
+ ResizeTestParams()
+ : m_ResizeMethod(armnn::ResizeMethod::Bilinear)
+ , m_DataLayout(armnn::DataLayout::NCHW)
+ , m_InQuantScale(1.0f)
+ , m_InQuantOffset(0)
+ , m_OutQuantScale(1.0f)
+ , m_OutQuantOffset(0) {}
+
+ armnn::ResizeMethod m_ResizeMethod;
+ armnn::DataLayout m_DataLayout;
+
+ armnn::TensorShape m_InputShape;
+ armnn::TensorShape m_OutputShape;
+
+ std::vector<float> m_InputData;
+ std::vector<float> m_ExpectedOutputData;
+
+ float m_InQuantScale;
+ int32_t m_InQuantOffset;
+
+ float m_OutQuantScale;
+ int32_t m_OutQuantOffset;
+
+ void SetInQuantParams(float quantScale, int32_t quantOffset)
+ {
+ m_InQuantScale = quantScale;
+ m_InQuantOffset = quantOffset;
+ }
+
+ void SetOutQuantParams(float quantScale, int32_t quantOffset)
+ {
+ m_OutQuantScale = quantScale;
+ m_OutQuantOffset = quantOffset;
+ }
+
+ void SetInOutQuantParams(float quantScale, int32_t quantOffset)
+ {
+ SetInQuantParams(quantScale, quantOffset);
+ SetOutQuantParams(quantScale, quantOffset);
+ }
+};
+
+template<size_t NumDims,
+ armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, NumDims> ResizeTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const ResizeTestParams& params)
+{
+ armnn::TensorInfo inputInfo(params.m_InputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(params.m_OutputShape, ArmnnType);
+
+ if (armnn::IsQuantizedType<T>())
+ {
+ inputInfo.SetQuantizationScale(params.m_InQuantScale);
+ inputInfo.SetQuantizationOffset(params.m_InQuantOffset);
+
+ outputInfo.SetQuantizationScale(params.m_OutQuantScale);
+ outputInfo.SetQuantizationOffset(params.m_OutQuantOffset);
+ }
+
+ std::vector<T> inputData =
+ armnnUtils::QuantizedVector<T>(params.m_InputData, params.m_InQuantScale, params.m_InQuantOffset);
+
+ std::vector<T> expectedOutputData =
+ armnnUtils::QuantizedVector<T>(params.m_ExpectedOutputData,
+ params.m_OutQuantScale,
+ params.m_OutQuantOffset);
+
+ if (params.m_DataLayout == armnn::DataLayout::NHWC)
+ {
+ PermuteTensorNchwToNhwc(inputInfo, inputData);
+ PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
+ }
+
+ auto input = MakeTensor<T, NumDims>(inputInfo, inputData);
+
+ LayerTestResult<T, NumDims> result(outputInfo);
+ result.outputExpected = MakeTensor<T, NumDims>(outputInfo, expectedOutputData);
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
+
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Method = params.m_ResizeMethod;
+ descriptor.m_Parameters.m_DataLayout = params.m_DataLayout;
+
+ armnnUtils::DataLayoutIndexed dataLayoutIndexed(params.m_DataLayout);
+ descriptor.m_Parameters.m_TargetWidth = params.m_OutputShape[dataLayoutIndexed.GetWidthIndex()];
+ descriptor.m_Parameters.m_TargetHeight = params.m_OutputShape[dataLayoutIndexed.GetHeightIndex()];
+
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle.get(), input.origin());
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+ return result;
+}
+
+} // anonymous namespace
+
+//
+// Bilinear
+//
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeBilinearNopTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::Bilinear;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 4, 4 };
+ testParams.m_OutputShape = testParams.m_InputShape;
+
+ testParams.m_InputData =
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f,
+
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f
+ };
+
+ testParams.m_ExpectedOutputData = testParams.m_InputData;
+
+ testParams.SetInOutQuantParams(1.5f, 3);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleResizeBilinearTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::Bilinear;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 2, 2 };
+ testParams.m_OutputShape = { 1, 2, 1, 1 };
+
+ testParams.m_InputData =
+ {
+ 1.0f, 255.0f,
+ 200.0f, 250.0f,
+
+ 250.0f, 200.0f,
+ 250.0f, 1.0f
+ };
+
+ // The 'resize' operation projects the top-left corner of output texels into the input image,
+ // then figures out the interpolants and weights. Note this is different to projecting the centre of the
+ // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
+ // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
+ // which we would expect if projecting the centre).
+ testParams.m_ExpectedOutputData =
+ {
+ 1.0f,
+
+ 250.0f
+ };
+
+ testParams.SetInOutQuantParams(0.1567f, 1);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeBilinearSqMinTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::Bilinear;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 4, 4 };
+ testParams.m_OutputShape = { 1, 2, 2, 2 };
+
+ testParams.m_InputData =
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f,
+
+ 7.0f, 6.0f, 5.0f, 4.0f,
+ 6.0f, 5.0f, 4.0f, 3.0f,
+ 5.0f, 4.0f, 3.0f, 2.0f,
+ 4.0f, 3.0f, 2.0f, 1.0f
+ };
+
+ testParams.m_ExpectedOutputData =
+ {
+ 1.0f, 3.0f,
+ 3.0f, 5.0f,
+
+ 7.0f, 5.0f,
+ 5.0f, 3.0f
+ };
+
+ testParams.SetInOutQuantParams(3.141592f, 3);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeBilinearMinTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::Bilinear;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 3, 5 };
+ testParams.m_OutputShape = { 1, 2, 2, 3 };
+
+ testParams.m_InputData =
+ {
+ 1.5f, 3.0f, 4.5f, 6.0f, 7.5f,
+ 9.0f, 10.5f, 12.0f, 13.5f, 15.0f,
+ 16.5f, 18.0f, 19.5f, 21.0f, 22.5f,
+
+ 16.5f, 18.0f, 19.5f, 21.0f, 22.5f,
+ 9.0f, 10.5f, 12.0f, 13.5f, 15.0f,
+ 1.5f, 3.0f, 4.5f, 6.0f, 7.5f
+ };
+
+ testParams.m_ExpectedOutputData =
+ {
+ 1.50f, 4.00f, 6.50f,
+ 12.75f, 15.25f, 17.75f,
+
+ 16.50f, 19.00f, 21.50f,
+ 5.25f, 7.75f, 10.25f
+ };
+
+ testParams.SetInOutQuantParams(1.5f, -1);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeBilinearMagTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::Bilinear;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 3, 2 };
+ testParams.m_OutputShape = { 1, 2, 3, 5 };
+
+ testParams.m_InputData =
+ {
+ 1.0f, 2.0f,
+ 13.0f, 21.0f,
+ 144.0f, 233.0f,
+
+ 233.0f, 144.0f,
+ 21.0f, 13.0f,
+ 2.0f, 1.0f
+ };
+
+ testParams.m_ExpectedOutputData =
+ {
+ 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
+ 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
+ 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
+
+ 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
+ 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
+ 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
+ };
+
+ testParams.SetInQuantParams(1.0f, 0);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+//
+// NearestNeighbor
+//
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborNopTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::NearestNeighbor;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 4, 4 };
+ testParams.m_OutputShape = testParams.m_InputShape;
+
+ testParams.m_InputData =
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f,
+
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f
+ };
+
+ testParams.m_ExpectedOutputData = testParams.m_InputData;
+
+ testParams.SetInOutQuantParams(1.5f, 3);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleResizeNearestNeighborTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::NearestNeighbor;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 2, 2 };
+ testParams.m_OutputShape = { 1, 2, 1, 1 };
+
+ testParams.m_InputData =
+ {
+ 1.0f, 255.0f,
+ 200.0f, 250.0f,
+
+ 250.0f, 200.0f,
+ 250.0f, 1.0f
+ };
+
+ // The 'resize' operation projects the top-left corner of output texels into the input image,
+ // then figures out the interpolants and weights. Note this is different to projecting the centre of the
+ // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
+ // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
+ // which we would expect if projecting the centre).
+ testParams.m_ExpectedOutputData =
+ {
+ 1.0f,
+
+ 250.0f
+ };
+
+ testParams.SetInOutQuantParams(0.1567f, 1);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborSqMinTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::NearestNeighbor;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 4, 4 };
+ testParams.m_OutputShape = { 1, 2, 2, 2 };
+
+ testParams.m_InputData =
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f,
+
+ 7.0f, 6.0f, 5.0f, 4.0f,
+ 6.0f, 5.0f, 4.0f, 3.0f,
+ 5.0f, 4.0f, 3.0f, 2.0f,
+ 4.0f, 3.0f, 2.0f, 1.0f
+ };
+
+ testParams.m_ExpectedOutputData =
+ {
+ 1.0f, 3.0f,
+ 3.0f, 5.0f,
+
+ 7.0f, 5.0f,
+ 5.0f, 3.0f
+ };
+
+ testParams.SetInOutQuantParams(3.141592f, 3);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborMinTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::NearestNeighbor;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 3, 5 };
+ testParams.m_OutputShape = { 1, 2, 2, 3 };
+
+ testParams.m_InputData =
+ {
+ 1.5f, 3.0f, 4.5f, 6.0f, 7.5f,
+ 9.0f, 10.5f, 12.0f, 13.5f, 15.0f,
+ 16.5f, 18.0f, 19.5f, 21.0f, 22.5f,
+
+ 16.5f, 18.0f, 19.5f, 21.0f, 22.5f,
+ 9.0f, 10.5f, 12.0f, 13.5f, 15.0f,
+ 1.5f, 3.0f, 4.5f, 6.0f, 7.5f
+ };
+
+ testParams.m_ExpectedOutputData =
+ {
+ 1.5f, 3.0f, 6.0f,
+ 9.0f, 10.5f, 13.5f,
+
+ 16.5f, 18.0f, 21.0f,
+ 9.0f, 10.5f, 13.5f
+ };
+
+ testParams.SetInOutQuantParams(1.5f, -1);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout,
+ float inQuantScale,
+ int32_t inQuantOffset,
+ float outQuantScale,
+ int32_t outQuantOffset)
+{
+ ResizeTestParams testParams;
+ testParams.m_ResizeMethod = armnn::ResizeMethod::NearestNeighbor;
+ testParams.m_DataLayout = dataLayout;
+
+ testParams.m_InputShape = { 1, 2, 3, 2 };
+ testParams.m_OutputShape = { 1, 2, 3, 5 };
+
+ testParams.m_InputData =
+ {
+ 0.183005f, 2.379065f,
+ 1.054970f, 1.302565f,
+ 2.400595f, 0.688960f,
+
+ 2.400595f, 0.688960f,
+ 1.054970f, 1.302565f,
+ 0.183005f, 2.379065f,
+ };
+
+ testParams.m_ExpectedOutputData =
+ {
+ 0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f,
+ 1.054970f, 1.054970f, 1.054970f, 1.302565f, 1.302565f,
+ 2.400595f, 2.400595f, 2.400595f, 0.688960f, 0.688960f,
+
+ 2.400595f, 2.400595f, 2.400595f, 0.688960f, 0.688960f,
+ 1.054970f, 1.054970f, 1.054970f, 1.302565f, 1.302565f,
+ 0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f
+ };
+
+ testParams.SetInQuantParams(inQuantScale, inQuantOffset);
+ testParams.SetOutQuantParams(outQuantScale, outQuantOffset);
+
+ return ResizeTestImpl<4, ArmnnType>(workloadFactory, memoryManager, testParams);
+}
+
+//
+// Explicit template instantiations
+//
+
+// Float32
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeBilinearNopTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+SimpleResizeBilinearTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeBilinearMinTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeBilinearMagTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout,
+ float inQuantScale,
+ int32_t inQuantOffset,
+ float outQuantScale,
+ int32_t outQuantOffset);
+
+// Float16
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+ResizeBilinearNopTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+SimpleResizeBilinearTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+ResizeBilinearMinTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+ResizeBilinearMagTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout,
+ float inQuantScale,
+ int32_t inQuantOffset,
+ float outQuantScale,
+ int32_t outQuantOffset);
+
+// QAsymm8
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout,
+ float inQuantScale,
+ int32_t inQuantOffset,
+ float outQuantScale,
+ int32_t outQuantOffset);
+
+// QSymm16
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+ResizeBilinearNopTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+ResizeBilinearMinTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+ResizeBilinearMagTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout,
+ float inQuantScale,
+ int32_t inQuantOffset,
+ float outQuantScale,
+ int32_t outQuantOffset);
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp
index 56ce51a844..4fe4b73ada 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp
@@ -7,1017 +7,75 @@
#include "LayerTestResult.hpp"
-#include <Permute.hpp>
-#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
-#include <TensorUtils.hpp>
-#include <armnn/ArmNN.hpp>
+#include <armnn/Types.hpp>
#include <backendsCommon/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <backendsCommon/test/TensorCopyUtils.hpp>
-#include <backendsCommon/test/WorkloadTestUtils.hpp>
-
-#include <test/TensorHelpers.hpp>
-
-//
-// ResizeBilinear
-//
-
+// Bilinear
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeBilinearNopTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(1.5f);
- inputTensorInfo.SetQuantizationOffset(-3);
- outputTensorInfo.SetQuantizationScale(1.5f);
- outputTensorInfo.SetQuantizationOffset(-3);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1, 2, 3, 4,
- 2, 3, 4, 5,
- 3, 4, 5, 6,
- 4, 5, 6, 7
- }
- : std::initializer_list<float>
- {
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f,
-
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = input;
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
-
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
+ const armnn::DataLayout dataLayout);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleResizeBilinearTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 1, 1, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(0.1567f);
- inputTensorInfo.SetQuantizationOffset(1);
- outputTensorInfo.SetQuantizationScale(0.1567f);
- outputTensorInfo.SetQuantizationOffset(1);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1, 255,
- 200, 250
- }
- : std::initializer_list<float>
- {
- 1.0f, 255.0f,
- 200.0f, 250.0f,
-
- 250.0f, 200.0f,
- 250.0f, 1.0f
- };
-
- // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
- // then figures out the interpolants and weights. Note this is different to projecting the centre of the
- // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
- // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
- // which we would expect if projecting the centre).
-
- std::vector<float> outputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1
- }
- : std::initializer_list<float>
- {
- 1.0f,
-
- 250.0f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
-
- std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
-
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
+ const armnn::DataLayout dataLayout);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeBilinearSqMinTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(3.141592f);
- inputTensorInfo.SetQuantizationOffset(3);
- outputTensorInfo.SetQuantizationScale(3.141592f);
- outputTensorInfo.SetQuantizationOffset(3);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1, 2, 3, 4,
- 2, 3, 4, 5,
- 3, 4, 5, 6,
- 4, 5, 6, 7
- }
- : std::initializer_list<float>
- {
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f,
-
- 7.0f, 6.0f, 5.0f, 4.0f,
- 6.0f, 5.0f, 4.0f, 3.0f,
- 5.0f, 4.0f, 3.0f, 2.0f,
- 4.0f, 3.0f, 2.0f, 1.0f
- };
-
- std::vector<float> outputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1, 3,
- 3, 5
- }
- : std::initializer_list<float>
- {
- 1.0f, 3.0f,
- 3.0f, 5.0f,
-
- 7.0f, 5.0f,
- 5.0f, 3.0f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
-
- std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
-
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeBilinearMinTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 2, 3, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 1, 2, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(1.5f);
- inputTensorInfo.SetQuantizationOffset(-1);
- outputTensorInfo.SetQuantizationScale(1.5f);
- outputTensorInfo.SetQuantizationOffset(-1);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 3.0f, 4.5f, 6.0f, // 1, 2, 3, : Expected quantised values
- 9.0f, 13.5f, 21.0f // 5, 8, 13
- }
- : std::initializer_list<float>
- {
- 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
- 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
- 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
-
- 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
- 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
- 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
- };
-
- std::vector<float> outputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 3.0f, 5.25f // 1, 3
- }
- : std::initializer_list<float>
- {
- 1.0f, 2.6666f, 6.00f,
- 78.5f, 179.3333f, 401.00f,
-
- 987.0f, 454.6670f, 203.33f,
- 48.5f, 22.3333f, 10.00f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
-
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeBilinearMagTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 3, 5, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(0.010765f);
- inputTensorInfo.SetQuantizationOffset(7);
- outputTensorInfo.SetQuantizationScale(0.010132f);
- outputTensorInfo.SetQuantizationOffset(-18);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 0.183005f, 2.379065f, // 24, 228, : Expected quantised values
- 1.054970f, 1.302565f, // 105, 128,
- 2.400595f, 0.688960f // 230, 71
- }
- : std::initializer_list<float>
- {
- 1.0f, 2.0f,
- 13.0f, 21.0f,
- 144.0f, 233.0f,
-
- 233.0f, 144.0f,
- 21.0f, 13.0f,
- 2.0f, 1.0f
- };
-
- std::vector<float> outputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 0.18300501f, 1.06142902f, 1.93985295f, 2.37906504f, 2.37906504f,
- 1.05497003f, 1.15400803f, 1.25304604f, 1.30256498f, 1.30256498f,
- 2.40059495f, 1.71594095f, 1.03128707f, 0.68896002f, 0.68896002f
- // 0, 87, 173, 217, 217, : Expected quantised values
- // 86, 96, 106, 111, 111,
- // 219, 151, 84, 50, 50
- }
- : std::initializer_list<float>
- {
- 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
- 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
- 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
-
- 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
- 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
- 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
-
- std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
-
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
-
-//
-// ResizeNearestNeighbor
-//
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+// NearestNeighbor
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeNearestNeighborNopTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(1.5f);
- inputTensorInfo.SetQuantizationOffset(-3);
- outputTensorInfo.SetQuantizationScale(1.5f);
- outputTensorInfo.SetQuantizationOffset(-3);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1, 2, 3, 4,
- 2, 3, 4, 5,
- 3, 4, 5, 6,
- 4, 5, 6, 7
- }
- : std::initializer_list<float>
- {
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f,
-
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = input;
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleResizeNearestNeighborTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 1, 1, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(0.1567f);
- inputTensorInfo.SetQuantizationOffset(1);
- outputTensorInfo.SetQuantizationScale(0.1567f);
- outputTensorInfo.SetQuantizationOffset(1);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1, 255,
- 200, 250
- }
- : std::initializer_list<float>
- {
- 1.0f, 255.0f,
- 200.0f, 250.0f,
-
- 250.0f, 200.0f,
- 250.0f, 1.0f
- };
-
- // The 'resize' operation projects the top-left corner of output texels into the input image,
- // then figures out the interpolants and weights. Note this is different to projecting the centre of the
- // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
- // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
- // which we would expect if projecting the centre).
-
- std::vector<float> outputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1
- }
- : std::initializer_list<float>
- {
- 1.0f,
-
- 250.0f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
-
- std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeNearestNeighborSqMinTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(3.141592f);
- inputTensorInfo.SetQuantizationOffset(3);
- outputTensorInfo.SetQuantizationScale(3.141592f);
- outputTensorInfo.SetQuantizationOffset(3);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1, 2, 3, 4,
- 2, 3, 4, 5,
- 3, 4, 5, 6,
- 4, 5, 6, 7
- }
- : std::initializer_list<float>
- {
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f,
-
- 7.0f, 6.0f, 5.0f, 4.0f,
- 6.0f, 5.0f, 4.0f, 3.0f,
- 5.0f, 4.0f, 3.0f, 2.0f,
- 4.0f, 3.0f, 2.0f, 1.0f
- };
-
- std::vector<float> outputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 1, 3,
- 3, 5
- }
- : std::initializer_list<float>
- {
- 1.0f, 3.0f,
- 3.0f, 5.0f,
-
- 7.0f, 5.0f,
- 5.0f, 3.0f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
-
- std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeNearestNeighborMinTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 2, 3, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 1, 2, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(1.5f);
- inputTensorInfo.SetQuantizationOffset(-1);
- outputTensorInfo.SetQuantizationScale(1.5f);
- outputTensorInfo.SetQuantizationOffset(-1);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 3.0f, 4.5f, 6.0f, // 1, 2, 3, : Expected quantised values
- 9.0f, 13.5f, 21.0f // 5, 8, 13
- }
- : std::initializer_list<float>
- {
- 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
- 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
- 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
-
- 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
- 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
- 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
- };
-
- std::vector<float> outputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 3.0f, 4.5f // 1, 3
- }
- : std::initializer_list<float>
- {
- 1.f, 2.f, 5.f,
- 13.f, 21.f, 55.f,
-
- 987.f, 610.f, 233.f,
- 89.f, 55.f, 21.f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout,
- float inQuantScale,
- int32_t inQuantOffset,
- float outQuantScale,
- int32_t outQuantOffset)
-{
- armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, ArmnnType);
-
- armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
- ? armnnUtils::GetTensorInfo(1, 1, 3, 5, dataLayout, ArmnnType)
- : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo.SetQuantizationScale(inQuantScale);
- inputTensorInfo.SetQuantizationOffset(inQuantOffset);
- outputTensorInfo.SetQuantizationScale(outQuantScale);
- outputTensorInfo.SetQuantizationOffset(outQuantOffset);
- }
-
- std::vector<float> inputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 0.183005f, 2.379065f, // 24, 228, : expected quantised values
- 1.054970f, 1.302565f, // 105, 128,
- 2.400595f, 0.688960f // 230, 71
- }
- : std::initializer_list<float>
- {
- 1.0f, 2.0f,
- 13.0f, 21.0f,
- 144.0f, 233.0f,
-
- 233.0f, 144.0f,
- 21.0f, 13.0f,
- 2.0f, 1.0f
- };
-
- std::vector<float> outputData = armnn::IsQuantizedType<T>()
- ? std::initializer_list<float>
- {
- 0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f,
- 1.054970f, 1.054970f, 1.054970f, 1.302565f, 1.302565f,
- 2.400595f, 2.400595f, 2.400595f, 0.688960f, 0.688960f
- }
- : std::initializer_list<float>
- {
- 1.f, 1.f, 1.f, 2.f, 2.f,
- 13.f, 13.f, 13.f, 21.f, 21.f,
- 144.f, 144.f, 144.f, 233.f, 233.f,
-
- 233.f, 233.f, 233.f, 144.f, 144.f,
- 21.f, 21.f, 21.f, 13.f, 13.f,
- 2.f, 2.f, 2.f, 1.f, 1.f
- };
-
- const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
- if (dataLayout == armnn::DataLayout::NHWC)
- {
- std::vector<float> tmp(inputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
- inputData = tmp;
-
- std::vector<float> tmp1(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
- outputData = tmp1;
- }
-
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
-
- std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ResizeQueueDescriptor descriptor;
- descriptor.m_Parameters.m_DataLayout = dataLayout;
- descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
-}
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout,
+ float inQuantScale,
+ int32_t inQuantOffset,
+ float outQuantScale,
+ int32_t outQuantOffset);