aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2018-10-12 12:02:18 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:53 +0100
commit233b3d685b4e4e931e86e021b77ee81d5b818f38 (patch)
tree5c538b52f0cecbb49a8b1248da60fa1580a16d08
parentf9aeef0e036df176699aa96d30d2ca8d7546534e (diff)
downloadarmnn-233b3d685b4e4e931e86e021b77ee81d5b818f38.tar.gz
IVGCVSW-1951 Remove type templating from NeonConstantWorkload
Change-Id: Ib831f02ab6b5d96f1a959187d8f3e694e6257ae5
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/backend.mk3
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt7
-rw-r--r--src/backends/neon/workloads/NeonBaseConstantWorkload.hpp82
-rw-r--r--src/backends/neon/workloads/NeonConstantFloatWorkload.cpp17
-rw-r--r--src/backends/neon/workloads/NeonConstantFloatWorkload.hpp20
-rw-r--r--src/backends/neon/workloads/NeonConstantUint8Workload.cpp17
-rw-r--r--src/backends/neon/workloads/NeonConstantUint8Workload.hpp20
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp75
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.hpp24
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp4
11 files changed, 104 insertions, 167 deletions
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 1e8ab1afa8..a43722728d 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -218,7 +218,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2No
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NeonConstantFloatWorkload, NeonConstantUint8Workload>(descriptor, info);
+ return std::make_unique<NeonConstantWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index af83fb1321..e63baa0541 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -16,8 +16,7 @@ BACKEND_SOURCES := \
workloads/NeonActivationWorkload.cpp \
workloads/NeonAdditionFloatWorkload.cpp \
workloads/NeonBatchNormalizationFloatWorkload.cpp \
- workloads/NeonConstantFloatWorkload.cpp \
- workloads/NeonConstantUint8Workload.cpp \
+ workloads/NeonConstantWorkload.cpp \
workloads/NeonConvertFp16ToFp32Workload.cpp \
workloads/NeonConvertFp32ToFp16Workload.cpp \
workloads/NeonConvolution2dBaseWorkload.cpp \
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index a96c27c75a..0b0b9ed5a0 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -8,13 +8,10 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonActivationWorkload.hpp
NeonAdditionFloatWorkload.cpp
NeonAdditionFloatWorkload.hpp
- NeonBaseConstantWorkload.hpp
NeonBatchNormalizationFloatWorkload.cpp
NeonBatchNormalizationFloatWorkload.hpp
- NeonConstantFloatWorkload.cpp
- NeonConstantFloatWorkload.hpp
- NeonConstantUint8Workload.cpp
- NeonConstantUint8Workload.hpp
+ NeonConstantWorkload.cpp
+ NeonConstantWorkload.hpp
NeonConvertFp16ToFp32Workload.cpp
NeonConvertFp16ToFp32Workload.hpp
NeonConvertFp32ToFp16Workload.cpp
diff --git a/src/backends/neon/workloads/NeonBaseConstantWorkload.hpp b/src/backends/neon/workloads/NeonBaseConstantWorkload.hpp
deleted file mode 100644
index 828e476d29..0000000000
--- a/src/backends/neon/workloads/NeonBaseConstantWorkload.hpp
+++ /dev/null
@@ -1,82 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <arm_compute/core/Types.h>
-#include <armnnUtils/Half.hpp>
-#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
-#include <backends/neon/NeonTensorHandle.hpp>
-#include <backends/neon/workloads/NeonWorkloadUtils.hpp>
-#include <backends/CpuTensorHandle.hpp>
-#include <backends/Workload.hpp>
-
-#include <boost/cast.hpp>
-
-namespace armnn
-{
-
-// Base class template providing an implementation of the Constant layer common to all data types.
-template <armnn::DataType... DataFormats>
-class NeonBaseConstantWorkload : public TypedWorkload<ConstantQueueDescriptor, DataFormats...>
-{
-public:
- NeonBaseConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info)
- : TypedWorkload<ConstantQueueDescriptor, DataFormats...>(descriptor, info)
- , m_RanOnce(false)
- {
- }
-
- virtual void Execute() const override
- {
- using namespace armcomputetensorutils;
-
- // The intermediate tensor held by the corresponding layer output handler can be initialised with the
- // given data on the first inference, then reused for subsequent inferences.
- // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer
- // may not have been configured at the time.
- if (!m_RanOnce)
- {
- const ConstantQueueDescriptor& data = this->m_Data;
-
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
- arm_compute::ITensor& output =
- boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
- arm_compute::DataType computeDataType =
- boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetDataType();
-
- switch (computeDataType)
- {
- case arm_compute::DataType::F16:
- {
- CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<Half>(), output);
- break;
- }
- case arm_compute::DataType::F32:
- {
- CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<float>(), output);
- break;
- }
- case arm_compute::DataType::QASYMM8:
- {
- CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
- break;
- }
- default:
- {
- BOOST_ASSERT_MSG(false, "Unknown data type");
- break;
- }
- }
-
- m_RanOnce = true;
- }
- }
-
-private:
- mutable bool m_RanOnce;
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantFloatWorkload.cpp b/src/backends/neon/workloads/NeonConstantFloatWorkload.cpp
deleted file mode 100644
index dbdd057101..0000000000
--- a/src/backends/neon/workloads/NeonConstantFloatWorkload.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonConstantFloatWorkload.hpp"
-
-namespace armnn
-{
-
-void NeonConstantFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConstantFloatWorkload_Execute");
- NeonBaseConstantWorkload::Execute();
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantFloatWorkload.hpp b/src/backends/neon/workloads/NeonConstantFloatWorkload.hpp
deleted file mode 100644
index c35b5fda3e..0000000000
--- a/src/backends/neon/workloads/NeonConstantFloatWorkload.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "NeonBaseConstantWorkload.hpp"
-
-namespace armnn
-{
-
-class NeonConstantFloatWorkload : public NeonBaseConstantWorkload<DataType::Float16, DataType::Float32>
-{
-public:
- using NeonBaseConstantWorkload<DataType::Float16, DataType::Float32>::NeonBaseConstantWorkload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantUint8Workload.cpp b/src/backends/neon/workloads/NeonConstantUint8Workload.cpp
deleted file mode 100644
index c607d86844..0000000000
--- a/src/backends/neon/workloads/NeonConstantUint8Workload.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonConstantUint8Workload.hpp"
-
-namespace armnn
-{
-
-void NeonConstantUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConstantUint8Workload_Execute");
- NeonBaseConstantWorkload::Execute();
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantUint8Workload.hpp b/src/backends/neon/workloads/NeonConstantUint8Workload.hpp
deleted file mode 100644
index 2cb9516afe..0000000000
--- a/src/backends/neon/workloads/NeonConstantUint8Workload.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "NeonBaseConstantWorkload.hpp"
-
-namespace armnn
-{
-
-class NeonConstantUint8Workload : public NeonBaseConstantWorkload<DataType::QuantisedAsymm8>
-{
-public:
- using NeonBaseConstantWorkload<DataType::QuantisedAsymm8>::NeonBaseConstantWorkload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
new file mode 100644
index 0000000000..a3485471c8
--- /dev/null
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -0,0 +1,75 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonConstantWorkload.hpp"
+
+#include <arm_compute/core/Types.h>
+#include <armnnUtils/Half.hpp>
+#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
+#include <backends/neon/NeonTensorHandle.hpp>
+#include <backends/CpuTensorHandle.hpp>
+#include <backends/Workload.hpp>
+
+#include <boost/cast.hpp>
+
+namespace armnn
+{
+
+NeonConstantWorkload::NeonConstantWorkload(const ConstantQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<ConstantQueueDescriptor>(descriptor, info)
+ , m_RanOnce(false)
+{
+}
+
+void NeonConstantWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConstantWorkload_Execute");
+
+ using namespace armcomputetensorutils;
+
+ // The intermediate tensor held by the corresponding layer output handler can be initialised with the
+ // given data on the first inference, then reused for subsequent inferences.
+ // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer
+ // may not have been configured at the time.
+ if (!m_RanOnce)
+ {
+ const ConstantQueueDescriptor& data = this->m_Data;
+
+ BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ arm_compute::ITensor& output =
+ boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
+ arm_compute::DataType computeDataType =
+ boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetDataType();
+
+ switch (computeDataType)
+ {
+ case arm_compute::DataType::F16:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<Half>(), output);
+ break;
+ }
+ case arm_compute::DataType::F32:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<float>(), output);
+ break;
+ }
+ case arm_compute::DataType::QASYMM8:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
+ break;
+ }
+ default:
+ {
+ BOOST_ASSERT_MSG(false, "Unknown data type");
+ break;
+ }
+ }
+
+ m_RanOnce = true;
+ }
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.hpp b/src/backends/neon/workloads/NeonConstantWorkload.hpp
new file mode 100644
index 0000000000..72069633f8
--- /dev/null
+++ b/src/backends/neon/workloads/NeonConstantWorkload.hpp
@@ -0,0 +1,24 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backends/neon/workloads/NeonWorkloadUtils.hpp>
+
+namespace armnn
+{
+
+class NeonConstantWorkload : public BaseWorkload<ConstantQueueDescriptor>
+{
+public:
+ NeonConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ virtual void Execute() const override;
+
+private:
+ mutable bool m_RanOnce;
+};
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 93711b6fc1..702ddb5c82 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -6,10 +6,8 @@
#pragma once
#include "NeonActivationWorkload.hpp"
#include "NeonAdditionFloatWorkload.hpp"
-#include "NeonBaseConstantWorkload.hpp"
#include "NeonBatchNormalizationFloatWorkload.hpp"
-#include "NeonConstantFloatWorkload.hpp"
-#include "NeonConstantUint8Workload.hpp"
+#include "NeonConstantWorkload.hpp"
#include "NeonConvertFp16ToFp32Workload.hpp"
#include "NeonConvertFp32ToFp16Workload.hpp"
#include "NeonConvolution2dBaseWorkload.hpp"