aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEllen Norris-Thompson <ellen.norris-thompson@arm.com>2019-07-15 14:23:30 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-07-17 16:36:30 +0000
commit37e68686a4889b6e79b035356202903647671f13 (patch)
treef248e6ba83a3dcecc0c0bef829123a4378c23e99
parentbcca1f4a7ba8364f7b5e58e8e8866ccd7d530f92 (diff)
downloadarmnn-37e68686a4889b6e79b035356202903647671f13.tar.gz
IVGCVSW-3297 Add Neon backend support for ResizeNearestNeighbour
* Renamed NeonResizeBilinearWorkload.* to NeonResizeWorkload.* and added support for ResizeNearestNeighbour * Added CreateWorkload and LayerTests for Neon backend Signed-off-by: Ellen Norris-Thompson <ellen.norris-thompson@arm.com> Change-Id: I72f5340608a0928f8b32a41d1915ee2c35849f18
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp29
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp25
-rw-r--r--src/backends/neon/backend.mk2
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp52
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp66
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt4
-rw-r--r--src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp59
-rw-r--r--src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp29
-rw-r--r--src/backends/neon/workloads/NeonResizeWorkload.cpp72
-rw-r--r--src/backends/neon/workloads/NeonResizeWorkload.hpp29
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp2
11 files changed, 248 insertions, 121 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index ea875f6926..cb709c315a 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -39,7 +39,7 @@
#include "workloads/NeonPooling2dWorkload.hpp"
#include "workloads/NeonPreluWorkload.hpp"
#include "workloads/NeonQuantizeWorkload.hpp"
-#include "workloads/NeonResizeBilinearWorkload.hpp"
+#include "workloads/NeonResizeWorkload.hpp"
#include "workloads/NeonSoftmaxBaseWorkload.hpp"
#include "workloads/NeonSpaceToDepthWorkload.hpp"
#include "workloads/NeonSplitterWorkload.hpp"
@@ -511,25 +511,26 @@ bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- if (descriptor.m_Method == ResizeMethod::Bilinear)
- {
- FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
- reasonIfUnsupported,
- input,
- output);
- }
-
- return false;
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
}
bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
- reasonIfUnsupported,
- input,
- output);
+ ResizeDescriptor descriptor;
+ descriptor.m_Method = ResizeMethod::Bilinear;
+ descriptor.m_DataLayout = DataLayout::NCHW;
+
+ const TensorShape& outputShape = output.GetShape();
+ descriptor.m_TargetHeight = outputShape[2];
+ descriptor.m_TargetWidth = outputShape[3];
+
+ return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
}
bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index eadd636dd1..c50eaece1a 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -227,27 +227,22 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCo
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (descriptor.m_Parameters.m_Method == ResizeMethod::Bilinear)
- {
- ResizeBilinearQueueDescriptor resizeBilinearDescriptor;
- resizeBilinearDescriptor.m_Inputs = descriptor.m_Inputs;
- resizeBilinearDescriptor.m_Outputs = descriptor.m_Outputs;
-
- resizeBilinearDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
- resizeBilinearDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
- resizeBilinearDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-
- return std::make_unique<NeonResizeBilinearWorkload>(resizeBilinearDescriptor, info);
- }
-
- return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+ return std::make_unique<NeonResizeWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<NeonResizeBilinearWorkload>(descriptor, info);
+ ResizeQueueDescriptor resizeDescriptor;
+ resizeDescriptor.m_Inputs = descriptor.m_Inputs;
+ resizeDescriptor.m_Outputs = descriptor.m_Outputs;
+
+ resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
+ resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
+ resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
+
+ return CreateResize(resizeDescriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization(
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 9b0c188ea1..305abfcb49 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -45,7 +45,7 @@ BACKEND_SOURCES := \
workloads/NeonPreluWorkload.cpp \
workloads/NeonQuantizeWorkload.cpp \
workloads/NeonReshapeWorkload.cpp \
- workloads/NeonResizeBilinearWorkload.cpp \
+ workloads/NeonResizeWorkload.cpp \
workloads/NeonSoftmaxBaseWorkload.cpp \
workloads/NeonSoftmaxFloatWorkload.cpp \
workloads/NeonSoftmaxUint8Workload.cpp \
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 49c5a72a90..ac7eb253c2 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -19,6 +19,12 @@ BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon)
namespace
{
+boost::test_tools::predicate_result CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle,
+ std::initializer_list<unsigned int> expectedDimensions)
+{
+ return CompareTensorHandleShape<IAclTensorHandle>(tensorHandle, expectedDimensions);
+}
+
bool TestNeonTensorHandleInfo(armnn::IAclTensorHandle* handle, const armnn::TensorInfo& expectedInfo)
{
using namespace armnn::armcomputetensorutils;
@@ -493,6 +499,52 @@ BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
}
+template <typename ResizeWorkloadType, armnn::DataType DataType>
+static void NeonCreateResizeWorkloadTest(DataLayout dataLayout)
+{
+ Graph graph;
+ NeonWorkloadFactory factory =
+ NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
+ auto workload = CreateResizeBilinearWorkloadTest<ResizeWorkloadType, DataType>(factory, graph, dataLayout);
+
+ auto queueDescriptor = workload->GetData();
+
+ auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ switch (dataLayout)
+ {
+ case DataLayout::NHWC:
+ BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
+ BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+ break;
+ case DataLayout::NCHW:
+ default:
+ BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
+ BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+ }
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
+{
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
+{
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
+{
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+{
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+}
+
template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
static void NeonCreateSoftmaxWorkloadTest()
{
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 586e994db5..aeebd4fd9e 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -616,6 +616,72 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>,
armnn::DataLayout::NHWC)
+// Resize NearestNeighbor - NCHW
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
+ SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
+ ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
+ ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
+ ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
+ ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
+ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+
+// Resize NearestNeighbor - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
+ SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
+ ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
+ ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
+ ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
+ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+
// Quantize
ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 0ad961aa86..7bde80859e 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -56,8 +56,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonQuantizeWorkload.hpp
NeonReshapeWorkload.cpp
NeonReshapeWorkload.hpp
- NeonResizeBilinearWorkload.cpp
- NeonResizeBilinearWorkload.hpp
+ NeonResizeWorkload.cpp
+ NeonResizeWorkload.hpp
NeonSoftmaxBaseWorkload.cpp
NeonSoftmaxBaseWorkload.hpp
NeonSoftmaxFloatWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp
deleted file mode 100644
index 6943d872e4..0000000000
--- a/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonResizeBilinearWorkload.hpp"
-
-#include <aclCommon/ArmComputeUtils.hpp>
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
-#include <neon/NeonTensorHandle.hpp>
-#include <neon/NeonLayerSupport.hpp>
-
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
-{
- const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
-
- return arm_compute::NEScale::validate(&aclInputInfo,
- &aclOutputInfo,
- arm_compute::InterpolationPolicy::BILINEAR,
- arm_compute::BorderMode::REPLICATE,
- arm_compute::PixelValue(0.f),
- arm_compute::SamplingPolicy::TOP_LEFT);
-}
-
-NeonResizeBilinearWorkload::NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : BaseWorkload<ResizeBilinearQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("NeonResizeBilinearWorkload", 1, 1);
-
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
- input.info()->set_data_layout(aclDataLayout);
- output.info()->set_data_layout(aclDataLayout);
-
- m_ResizeBilinearLayer.configure(&input,
- &output,
- arm_compute::InterpolationPolicy::BILINEAR,
- arm_compute::BorderMode::REPLICATE,
- arm_compute::PixelValue(0.f),
- arm_compute::SamplingPolicy::TOP_LEFT);
-};
-
-void NeonResizeBilinearWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonResizeBilinearWorkload_Execute");
- m_ResizeBilinearLayer.run();
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp
deleted file mode 100644
index 21753b3127..0000000000
--- a/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-
-#include <neon/workloads/NeonWorkloadUtils.hpp>
-
-#include <arm_compute/runtime/NEON/functions/NEScale.h>
-
-namespace armnn
-{
-
-arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
-
-class NeonResizeBilinearWorkload : public BaseWorkload<ResizeBilinearQueueDescriptor>
-{
-public:
- NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- mutable arm_compute::NEScale m_ResizeBilinearLayer;
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeWorkload.cpp b/src/backends/neon/workloads/NeonResizeWorkload.cpp
new file mode 100644
index 0000000000..a4e4a4a511
--- /dev/null
+++ b/src/backends/neon/workloads/NeonResizeWorkload.cpp
@@ -0,0 +1,72 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonResizeWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor)
+{
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+ arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(descriptor.m_DataLayout);
+ aclInputInfo.set_data_layout(aclDataLayout);
+ aclOutputInfo.set_data_layout(aclDataLayout);
+
+ arm_compute::InterpolationPolicy aclInterpolationPolicy =
+ ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Method);
+
+ return arm_compute::NEScale::validate(&aclInputInfo,
+ &aclOutputInfo,
+ aclInterpolationPolicy,
+ arm_compute::BorderMode::REPLICATE,
+ arm_compute::PixelValue(0.f),
+ arm_compute::SamplingPolicy::TOP_LEFT);
+}
+
+NeonResizeWorkload::NeonResizeWorkload(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<ResizeQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonResizeWorkload", 1, 1);
+
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ arm_compute::InterpolationPolicy aclInterpolationPolicy =
+ ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Parameters.m_Method);
+
+ m_ResizeLayer.configure(&input,
+ &output,
+ aclInterpolationPolicy,
+ arm_compute::BorderMode::REPLICATE,
+ arm_compute::PixelValue(0.f),
+ arm_compute::SamplingPolicy::TOP_LEFT);
+};
+
+void NeonResizeWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonResizeWorkload_Execute");
+ m_ResizeLayer.run();
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeWorkload.hpp b/src/backends/neon/workloads/NeonResizeWorkload.hpp
new file mode 100644
index 0000000000..69995c6311
--- /dev/null
+++ b/src/backends/neon/workloads/NeonResizeWorkload.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEScale.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor);
+
+class NeonResizeWorkload : public BaseWorkload<ResizeQueueDescriptor>
+{
+public:
+ NeonResizeWorkload(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info);
+ void Execute() const override;
+
+private:
+ mutable arm_compute::NEScale m_ResizeLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index a9604a160c..c5f4a54b77 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -30,7 +30,7 @@
#include "NeonPreluWorkload.hpp"
#include "NeonQuantizeWorkload.hpp"
#include "NeonReshapeWorkload.hpp"
-#include "NeonResizeBilinearWorkload.hpp"
+#include "NeonResizeWorkload.hpp"
#include "NeonSoftmaxFloatWorkload.hpp"
#include "NeonSoftmaxUint8Workload.hpp"
#include "NeonSpaceToDepthWorkload.hpp"