aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2020-01-27 12:14:47 +0000
committermike.kelly <mike.kelly@arm.com>2020-01-27 14:49:07 +0000
commit568580290c746b81f9808b133be1aa9aeb503856 (patch)
tree8f6e0cb6e87473b26b68f4f4954056e40803fbe0
parenta023c40a7644f0b32f8f593b9a3614c92b5f933d (diff)
downloadarmnn-568580290c746b81f9808b133be1aa9aeb503856.tar.gz
IVGCVSW-4380 Add NEON backend support for BatchToSpaceNd
* Added NeonBatchToSpaceNdWorkload class. * Added CreateBatchToSpaceNd implementation to NeonWorkloadFactory. * Added IsBatchToSpaceNdSupported implementation to NeonLayerSupport. * Enabled BatchToSpaceNd tests on Neon backend. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I179feefaa67dc87a03fcbe52e5df100c1188f9a5
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp13
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp5
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/backend.mk1
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp17
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt2
-rw-r--r--src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp67
-rw-r--r--src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.hpp35
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp1
9 files changed, 142 insertions, 1 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index b8725be005..7d6e6d8d99 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -24,6 +24,7 @@
#include "workloads/NeonActivationWorkload.hpp"
#include "workloads/NeonArgMinMaxWorkload.hpp"
#include "workloads/NeonBatchNormalizationWorkload.hpp"
+#include "workloads/NeonBatchToSpaceNdWorkload.hpp"
#include "workloads/NeonConvolution2dWorkload.hpp"
#include "workloads/NeonDepthToSpaceWorkload.hpp"
#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
@@ -193,6 +194,18 @@ bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
descriptor);
}
+bool NeonLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchToSpaceNdWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
+}
+
bool NeonLayerSupport::IsComparisonSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 56a70c4886..d4f005155d 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -41,6 +41,11 @@ public:
const BatchNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsBatchToSpaceNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsComparisonSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index cb2e88eddb..38e471f1d3 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -133,7 +133,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+ return std::make_unique<NeonBatchToSpaceNdWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 740cbcd1ff..1c572e61f5 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -26,6 +26,7 @@ BACKEND_SOURCES := \
workloads/NeonAdditionWorkload.cpp \
workloads/NeonArgMinMaxWorkload.cpp \
workloads/NeonBatchNormalizationWorkload.cpp \
+ workloads/NeonBatchToSpaceNdWorkload.cpp \
workloads/NeonConcatWorkload.cpp \
workloads/NeonConstantWorkload.cpp \
workloads/NeonConvertFp16ToFp32Workload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 1b25cad993..1a9d7a9c50 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -27,6 +27,23 @@ using FactoryType = NeonWorkloadFactory;
// ============================================================================
// UNIT tests
+// BatchToSpace
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat321, BatchToSpaceNdNhwcTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat322, BatchToSpaceNdNhwcTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat323, BatchToSpaceNdNhwcTest3<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat322, BatchToSpaceNdNchwTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
+
// Convolution
ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 46b5332235..02ffedcf09 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -14,6 +14,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonArgMinMaxWorkload.hpp
NeonBatchNormalizationWorkload.cpp
NeonBatchNormalizationWorkload.hpp
+ NeonBatchToSpaceNdWorkload.cpp
+ NeonBatchToSpaceNdWorkload.hpp
NeonConcatWorkload.cpp
NeonConcatWorkload.hpp
NeonConstantWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
new file mode 100644
index 0000000000..a6e7aa4415
--- /dev/null
+++ b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
@@ -0,0 +1,67 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonBatchToSpaceNdWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+#include <ResolveType.hpp>
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& desc)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, desc.m_DataLayout);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, desc.m_DataLayout);
+
+ // ArmNN blockShape is [H, W] Cl asks for W, H
+ int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_BlockShape[0]);
+ int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_BlockShape[1]);
+
+ const arm_compute::Status aclStatus = arm_compute::NEBatchToSpaceLayer::validate(&aclInputInfo,
+ blockWidth,
+ blockHeight,
+ &aclOutputInfo);
+ return aclStatus;
+}
+
+NeonBatchToSpaceNdWorkload::NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueueDescriptor& desc,
+ const WorkloadInfo& info)
+ : BaseWorkload<BatchToSpaceNdQueueDescriptor>(desc, info)
+{
+ m_Data.ValidateInputsOutputs("NeonBatchToSpaceNdWorkload", 1, 1);
+
+ arm_compute::ITensor& input =
+ boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output =
+ boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ // ArmNN blockShape is [H, W] Cl asks for W, H
+ int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[1]);
+
+ m_Layer.reset(new arm_compute::NEBatchToSpaceLayer());
+ m_Layer->configure(&input, blockWidth, blockHeight, &output);
+ m_Layer->prepare();
+}
+
+void NeonBatchToSpaceNdWorkload::Execute() const
+{
+ if (m_Layer)
+ {
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSpaceToBatchNdWorkload_Execute");
+ m_Layer->run();
+ }
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.hpp b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.hpp
new file mode 100644
index 0000000000..dd146e5681
--- /dev/null
+++ b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.hpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include <armnn/Descriptors.hpp>
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEBatchToSpaceLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor);
+
+class NeonBatchToSpaceNdWorkload : public BaseWorkload<BatchToSpaceNdQueueDescriptor>
+{
+public:
+ using BaseWorkload<BatchToSpaceNdQueueDescriptor>::BaseWorkload;
+
+ NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ virtual void Execute() const override;
+
+private:
+ mutable std::unique_ptr<arm_compute::NEBatchToSpaceLayer> m_Layer;
+};
+
+}
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 39cf044b6c..b08483c443 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -10,6 +10,7 @@
#include "NeonDivisionWorkload.hpp"
#include "NeonArgMinMaxWorkload.hpp"
#include "NeonBatchNormalizationWorkload.hpp"
+#include "NeonBatchToSpaceNdWorkload.hpp"
#include "NeonConstantWorkload.hpp"
#include "NeonConvertFp16ToFp32Workload.hpp"
#include "NeonConvertFp32ToFp16Workload.hpp"