aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-02 17:25:47 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-07-12 14:44:27 +0000
commitcc0cefb6ca2abf22fc0128548b93a492df530705 (patch)
tree3b87a05e2b83ee432a04b7c43f769e0a572a2a5c
parent59c6670cac0aac1fedf426fb19ccd578da6f9f55 (diff)
downloadarmnn-cc0cefb6ca2abf22fc0128548b93a492df530705.tar.gz
IVGCVSW-3296 Add CL backend support for ResizeNearestNeighbour
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I7f4c722141837939fd8904c52e75704a15c8a5e3
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp13
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp73
-rw-r--r--src/backends/cl/ClLayerSupport.cpp27
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp26
-rw-r--r--src/backends/cl/backend.mk2
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp33
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp140
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt4
-rw-r--r--src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp44
-rw-r--r--src/backends/cl/workloads/ClResizeBilinearFloatWorkload.hpp25
-rw-r--r--src/backends/cl/workloads/ClResizeWorkload.cpp74
-rw-r--r--src/backends/cl/workloads/ClResizeWorkload.hpp29
-rw-r--r--src/backends/cl/workloads/ClWorkloads.hpp2
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp8
14 files changed, 323 insertions, 177 deletions
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 5b8f983ecc..0f56160051 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -122,6 +122,19 @@ ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnected
return fc_info;
}
+inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
+{
+ switch (resizeMethod)
+ {
+ case ResizeMethod::Bilinear:
+ return arm_compute::InterpolationPolicy::BILINEAR;
+ case ResizeMethod::NearestNeighbor:
+ return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
+ default:
+ throw InvalidArgumentException("Unsupported resize method");
+ }
+}
+
inline unsigned int ComputeSoftmaxAclAxis(const armnn::TensorInfo& tensor)
{
unsigned int dim = tensor.GetNumDimensions();
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 7db8471db6..d6747f5898 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -950,7 +950,11 @@ template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout);
+ const armnn::DataLayout dataLayout,
+ float inQuantScale,
+ int32_t inQuantOffset,
+ float outQuantScale,
+ int32_t outQuantOffset);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Rsqrt2dTestCommon(
@@ -3391,7 +3395,11 @@ template<armnn::DataType ArmnnType, typename T>
LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout dataLayout)
+ const armnn::DataLayout dataLayout,
+ float inQuantScale,
+ int32_t inQuantOffset,
+ float outQuantScale,
+ int32_t outQuantOffset)
{
armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
? armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType)
@@ -3402,46 +3410,46 @@ LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
if (armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(0.010765f);
- inputTensorInfo.SetQuantizationOffset(7);
- outputTensorInfo.SetQuantizationScale(0.010132f);
- outputTensorInfo.SetQuantizationOffset(-18);
+ inputTensorInfo.SetQuantizationScale(inQuantScale);
+ inputTensorInfo.SetQuantizationOffset(inQuantOffset);
+ outputTensorInfo.SetQuantizationScale(outQuantScale);
+ outputTensorInfo.SetQuantizationOffset(outQuantOffset);
}
std::vector<float> inputData = armnn::IsQuantizedType<T>()
? std::initializer_list<float>
- {
- 0.183005f, 2.379065f, // 24, 228, : Expected quantised values
- 1.05497f, 1.302565f, // 105, 128,
- 2.400595f, 0.68896f // 230, 71
- }
+ {
+ 0.183005f, 2.379065f, // 24, 228, : expected quantised values
+ 1.054970f, 1.302565f, // 105, 128,
+ 2.400595f, 0.688960f // 230, 71
+ }
: std::initializer_list<float>
- {
- 1.0f, 2.0f,
- 13.0f, 21.0f,
- 144.0f, 233.0f,
+ {
+ 1.0f, 2.0f,
+ 13.0f, 21.0f,
+ 144.0f, 233.0f,
- 233.0f, 144.0f,
- 21.0f, 13.0f,
- 2.0f, 1.0f
- };
+ 233.0f, 144.0f,
+ 21.0f, 13.0f,
+ 2.0f, 1.0f
+ };
std::vector<float> outputData = armnn::IsQuantizedType<T>()
? std::initializer_list<float>
- {
- 0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f,
- 1.05497f, 1.05497f, 1.05497f, 1.302565f, 1.302565f,
- 2.400595f, 2.400595f, 2.400595f, 0.68896f, 0.68896f
- }
+ {
+ 0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f,
+ 1.054970f, 1.054970f, 1.054970f, 1.302565f, 1.302565f,
+ 2.400595f, 2.400595f, 2.400595f, 0.688960f, 0.688960f
+ }
: std::initializer_list<float>
- {
- 1.f, 1.f, 1.f, 2.f, 2.f,
- 13.f, 13.f, 13.f, 21.f, 21.f,
- 144.f, 144.f, 144.f, 233.f, 233.f,
+ {
+ 1.f, 1.f, 1.f, 2.f, 2.f,
+ 13.f, 13.f, 13.f, 21.f, 21.f,
+ 144.f, 144.f, 144.f, 233.f, 233.f,
- 233.f, 233.f, 233.f, 144.f, 144.f,
- 21.f, 21.f, 21.f, 13.f, 13.f,
- 2.f, 2.f, 2.f, 1.f, 1.f
- };
+ 233.f, 233.f, 233.f, 144.f, 144.f,
+ 21.f, 21.f, 21.f, 13.f, 13.f,
+ 2.f, 2.f, 2.f, 1.f, 1.f
+ };
const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
if (dataLayout == armnn::DataLayout::NHWC)
@@ -3488,7 +3496,6 @@ LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
return result;
}
-
template<armnn::DataType ArmnnType, typename T, std::size_t InputDim, std::size_t OutputDim>
LayerTestResult<T, OutputDim> MeanTestHelper(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 6d9b197679..7eb1dcf39a 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -40,6 +40,7 @@
#include "workloads/ClPermuteWorkload.hpp"
#include "workloads/ClPooling2dWorkload.hpp"
#include "workloads/ClPreluWorkload.hpp"
+#include "workloads/ClResizeWorkload.hpp"
#include "workloads/ClQuantizeWorkload.hpp"
#include "workloads/ClSoftmaxBaseWorkload.hpp"
#include "workloads/ClSpaceToBatchNdWorkload.hpp"
@@ -570,28 +571,22 @@ bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
-
- if (descriptor.m_Method == ResizeMethod::Bilinear)
- {
- return IsSupportedForDataTypeCl(reasonIfUnsupported,
- input.GetDataType(),
- &TrueFunc<>,
- &FalseFuncU8<>);
- }
-
- return false;
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
- return IsSupportedForDataTypeCl(reasonIfUnsupported,
- input.GetDataType(),
- &TrueFunc<>,
- &FalseFuncU8<>);
+ ResizeDescriptor descriptor;
+ descriptor.m_Method = ResizeMethod::Bilinear;
+ descriptor.m_DataLayout = DataLayout::NCHW;
+
+ const TensorShape& outputShape = output.GetShape();
+ descriptor.m_TargetHeight = outputShape[2];
+ descriptor.m_TargetWidth = outputShape[3];
+
+ return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
}
bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 506acb4534..6ce87d872e 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -260,27 +260,23 @@ std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMemCopy(const MemCopy
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (descriptor.m_Parameters.m_Method == ResizeMethod::Bilinear)
- {
- ResizeBilinearQueueDescriptor resizeBilinearDescriptor;
- resizeBilinearDescriptor.m_Inputs = descriptor.m_Inputs;
- resizeBilinearDescriptor.m_Outputs = descriptor.m_Outputs;
-
- resizeBilinearDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
- resizeBilinearDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
- resizeBilinearDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-
- return MakeWorkload<ClResizeBilinearFloatWorkload, NullWorkload>(resizeBilinearDescriptor, info);
- }
-
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClResizeWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateResizeBilinear(
const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<ClResizeBilinearFloatWorkload, NullWorkload>(descriptor, info);
+ ResizeQueueDescriptor resizeDescriptor;
+ resizeDescriptor.m_Inputs = descriptor.m_Inputs;
+ resizeDescriptor.m_Outputs = descriptor.m_Outputs;
+
+ resizeDescriptor.m_Parameters.m_Method = ResizeMethod::Bilinear;
+ resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
+ resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
+ resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
+
+ return CreateResize(resizeDescriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFakeQuantization(
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 1bc1fb3d71..57d7cb9f03 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -48,7 +48,7 @@ BACKEND_SOURCES := \
workloads/ClPreluWorkload.cpp \
workloads/ClQuantizeWorkload.cpp \
workloads/ClReshapeWorkload.cpp \
- workloads/ClResizeBilinearFloatWorkload.cpp \
+ workloads/ClResizeWorkload.cpp \
workloads/ClSoftmaxBaseWorkload.cpp \
workloads/ClSoftmaxFloatWorkload.cpp \
workloads/ClSoftmaxUint8Workload.cpp \
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index b89abdb473..de13390f87 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -758,16 +758,15 @@ BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
}
-template <typename ResizeBilinearWorkloadType, typename armnn::DataType DataType>
-static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
+template <typename ResizeWorkloadType, typename armnn::DataType DataType>
+static void ClResizeWorkloadTest(DataLayout dataLayout)
{
Graph graph;
ClWorkloadFactory factory =
ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
- auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
+ auto workload = CreateResizeBilinearWorkloadTest<ResizeWorkloadType, DataType>(factory, graph, dataLayout);
- // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
auto queueDescriptor = workload->GetData();
auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
@@ -786,24 +785,34 @@ static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
}
}
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload)
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
{
- ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload)
+BOOST_AUTO_TEST_CASE(CreateResizeFloat16NchwWorkload)
{
- ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload)
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
{
- ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload)
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
{
- ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeFloat16NhwcWorkload)
+{
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+{
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
}
template <typename MeanWorkloadType, typename armnn::DataType DataType>
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index c786244ba1..f2ff294e92 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -305,30 +305,6 @@ ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dTest, armnn::DataLa
ARMNN_AUTO_TEST_CASE(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, armnn::DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, armnn::DataLayout::NCHW)
-// Resize Bilinear - NCHW data layout
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-
-// Resize Bilinear - NHWC data layout
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
- ResizeBilinearNopTest<armnn::DataType::Float32>,
- armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
- SimpleResizeBilinearTest<armnn::DataType::Float32>,
- armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
- ResizeBilinearSqMinTest<armnn::DataType::Float32>,
- armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
- ResizeBilinearMinTest<armnn::DataType::Float32>,
- armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
- ResizeBilinearMagTest<armnn::DataType::Float32>,
- armnn::DataLayout::NHWC)
-
// Constant
ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8SimpleQuantizationScaleNoOffsetTest)
@@ -539,6 +515,122 @@ ARMNN_AUTO_TEST_CASE(StridedSlice3DReverseUint8, StridedSlice3DReverseUint8Test)
ARMNN_AUTO_TEST_CASE(StridedSlice2DUint8, StridedSlice2DUint8Test)
ARMNN_AUTO_TEST_CASE(StridedSlice2DReverseUint8, StridedSlice2DReverseUint8Test)
+// Resize Bilinear - NCHW
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
+ SimpleResizeBilinearTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
+ SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
+ ResizeBilinearNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
+ ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
+ ResizeBilinearSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
+ ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
+ ResizeBilinearMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
+ ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+
+// Resize Bilinear - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
+ ResizeBilinearNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
+ ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
+ SimpleResizeBilinearTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
+ SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
+ ResizeBilinearSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
+ ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
+ ResizeBilinearMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
+ ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+
+// Resize NearestNeighbor - NCHW
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
+ SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
+ ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
+ ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
+ ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
+ ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
+ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+
+// Resize NearestNeighbor - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
+ SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
+ ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
+ ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
+ ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
+ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+
// Quantize
ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index d98956fb06..2a3b1ad6b6 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -60,8 +60,8 @@ list(APPEND armnnClBackendWorkloads_sources
ClQuantizeWorkload.hpp
ClReshapeWorkload.cpp
ClReshapeWorkload.hpp
- ClResizeBilinearFloatWorkload.cpp
- ClResizeBilinearFloatWorkload.hpp
+ ClResizeWorkload.cpp
+ ClResizeWorkload.hpp
ClSoftmaxBaseWorkload.cpp
ClSoftmaxBaseWorkload.hpp
ClSoftmaxFloatWorkload.cpp
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
deleted file mode 100644
index ac7d60c23b..0000000000
--- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClResizeBilinearFloatWorkload.hpp"
-#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
-#include <cl/ClLayerSupport.hpp>
-#include <aclCommon/ArmComputeUtils.hpp>
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-
-#include "ClWorkloadUtils.hpp"
-
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-ClResizeBilinearFloatWorkload::ClResizeBilinearFloatWorkload(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<ResizeBilinearQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClResizeBilinearFloatWorkload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
- input.info()->set_data_layout(aclDataLayout);
- output.info()->set_data_layout(aclDataLayout);
-
- m_ResizeBilinearLayer.configure(&input, &output, arm_compute::InterpolationPolicy::BILINEAR,
- arm_compute::BorderMode::REPLICATE, arm_compute::PixelValue(0.f),
- arm_compute::SamplingPolicy::TOP_LEFT);
-};
-
-void ClResizeBilinearFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloatWorkload_Execute");
- RunClFunction(m_ResizeBilinearLayer, CHECK_LOCATION());
-}
-
-} //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.hpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.hpp
deleted file mode 100644
index 07ddceccd2..0000000000
--- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.hpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-class ClResizeBilinearFloatWorkload : public FloatWorkload<ResizeBilinearQueueDescriptor>
-{
-public:
- ClResizeBilinearFloatWorkload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLScale m_ResizeBilinearLayer;
-};
-
-} //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeWorkload.cpp b/src/backends/cl/workloads/ClResizeWorkload.cpp
new file mode 100644
index 0000000000..3c9c3aab16
--- /dev/null
+++ b/src/backends/cl/workloads/ClResizeWorkload.cpp
@@ -0,0 +1,74 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClResizeWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <cl/ClLayerSupport.hpp>
+#include <cl/ClTensorHandle.hpp>
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status ClResizeWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor)
+{
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+ arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(descriptor.m_DataLayout);
+ aclInputInfo.set_data_layout(aclDataLayout);
+ aclOutputInfo.set_data_layout(aclDataLayout);
+
+ arm_compute::InterpolationPolicy aclInterpolationPolicy =
+ ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Method);
+
+ return arm_compute::CLScale::validate(&aclInputInfo,
+ &aclOutputInfo,
+ aclInterpolationPolicy,
+ arm_compute::BorderMode::REPLICATE,
+ arm_compute::PixelValue(0.f),
+ arm_compute::SamplingPolicy::TOP_LEFT);
+}
+
+ClResizeWorkload::ClResizeWorkload(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info) :
+ BaseWorkload<ResizeQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("ClResizeWorkload", 1, 1);
+
+ arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ arm_compute::InterpolationPolicy aclInterpolationPolicy =
+ ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Parameters.m_Method);
+
+ m_ResizeLayer.configure(&input,
+ &output,
+ aclInterpolationPolicy,
+ arm_compute::BorderMode::REPLICATE,
+ arm_compute::PixelValue(0.f),
+ arm_compute::SamplingPolicy::TOP_LEFT);
+};
+
+void ClResizeWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeWorkload_Execute");
+ RunClFunction(m_ResizeLayer, CHECK_LOCATION());
+}
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeWorkload.hpp b/src/backends/cl/workloads/ClResizeWorkload.hpp
new file mode 100644
index 0000000000..5a128fafda
--- /dev/null
+++ b/src/backends/cl/workloads/ClResizeWorkload.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClResizeWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor);
+
+class ClResizeWorkload : public BaseWorkload<ResizeQueueDescriptor>
+{
+public:
+ ClResizeWorkload(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info);
+ void Execute() const override;
+
+private:
+ mutable arm_compute::CLScale m_ResizeLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index 256b68c96a..a64dea27e7 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -30,7 +30,7 @@
#include "ClPreluWorkload.hpp"
#include "ClQuantizeWorkload.hpp"
#include "ClReshapeWorkload.hpp"
-#include "ClResizeBilinearFloatWorkload.hpp"
+#include "ClResizeWorkload.hpp"
#include "ClSoftmaxFloatWorkload.hpp"
#include "ClSoftmaxUint8Workload.hpp"
#include "ClSpaceToBatchNdWorkload.hpp"
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 447d95e2ee..57ac9463e1 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -680,10 +680,10 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16,
armnn::DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
- armnn::DataLayout::NCHW)
+ armnn::DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
- armnn::DataLayout::NCHW)
+ armnn::DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16,
SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
armnn::DataLayout::NCHW)
@@ -727,10 +727,10 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc,
armnn::DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
- armnn::DataLayout::NHWC)
+ armnn::DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
- armnn::DataLayout::NHWC)
+ armnn::DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16Nhwc,
ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
armnn::DataLayout::NHWC)