aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Bentham <Matthew.Bentham@arm.com>2018-12-10 10:48:52 +0000
committerMatthew Bentham <matthew.bentham@arm.com>2018-12-10 14:07:56 +0000
commit955258dde076a40373ff9044b5f84c095a0aacef (patch)
tree30baf5da7216bf964a01a81a14f70f84861817b2
parent679efdae8319ec15058725ed41a23a5463644e07 (diff)
downloadarmnn-955258dde076a40373ff9044b5f84c095a0aacef.tar.gz
MLCE-79 NEON QASYMM8 Addition Support
Unit tests not yet added as need Compute Library a84faffd. Change-Id: Ica16df493e8d6a76da9d1f74bf43b8403f9dff62
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp2
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/backend.mk2
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp4
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt4
-rw-r--r--src/backends/neon/workloads/NeonAdditionWorkload.cpp (renamed from src/backends/neon/workloads/NeonAdditionFloatWorkload.cpp)14
-rw-r--r--src/backends/neon/workloads/NeonAdditionWorkload.hpp (renamed from src/backends/neon/workloads/NeonAdditionFloatWorkload.hpp)4
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp2
8 files changed, 17 insertions, 17 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index fd279e5d55..36c9f8bc08 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -17,7 +17,7 @@
#include <boost/core/ignore_unused.hpp>
#ifdef ARMCOMPUTENEON_ENABLED
-#include "workloads/NeonAdditionFloatWorkload.hpp"
+#include "workloads/NeonAdditionWorkload.hpp"
#include "workloads/NeonActivationWorkload.hpp"
#include "workloads/NeonBatchNormalizationFloatWorkload.hpp"
#include "workloads/NeonConvolution2dWorkload.hpp"
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 0dfd52ebab..aed2d56646 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -161,7 +161,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateNormalization(
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkloadHelper<NeonAdditionFloatWorkload, NullWorkload>(descriptor, info);
+ return std::make_unique<NeonAdditionWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMultiplication(
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 2f74ecd1ce..88fb643c64 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -14,7 +14,7 @@ BACKEND_SOURCES := \
NeonTimer.cpp \
NeonWorkloadFactory.cpp \
workloads/NeonActivationWorkload.cpp \
- workloads/NeonAdditionFloatWorkload.cpp \
+ workloads/NeonAdditionWorkload.cpp \
workloads/NeonBatchNormalizationFloatWorkload.cpp \
workloads/NeonConstantWorkload.cpp \
workloads/NeonConvertFp16ToFp32Workload.cpp \
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index dc6ec16e49..adb64cba9b 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -107,7 +107,7 @@ static void NeonCreateElementwiseWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
{
- NeonCreateElementwiseWorkloadTest<NeonAdditionFloatWorkload,
+ NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
AdditionQueueDescriptor,
AdditionLayer,
DataType::Float16>();
@@ -116,7 +116,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
{
- NeonCreateElementwiseWorkloadTest<NeonAdditionFloatWorkload,
+ NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
AdditionQueueDescriptor,
AdditionLayer,
DataType::Float32>();
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index e383b04f25..2c2c9f05e6 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -6,8 +6,8 @@
list(APPEND armnnNeonBackendWorkloads_sources
NeonActivationWorkload.cpp
NeonActivationWorkload.hpp
- NeonAdditionFloatWorkload.cpp
- NeonAdditionFloatWorkload.hpp
+ NeonAdditionWorkload.cpp
+ NeonAdditionWorkload.hpp
NeonBatchNormalizationFloatWorkload.cpp
NeonBatchNormalizationFloatWorkload.hpp
NeonConstantWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonAdditionFloatWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
index 45de4ddc1a..70a3909091 100644
--- a/src/backends/neon/workloads/NeonAdditionFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "NeonAdditionFloatWorkload.hpp"
+#include "NeonAdditionWorkload.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
@@ -25,11 +25,11 @@ arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo& input0,
}
-NeonAdditionFloatWorkload::NeonAdditionFloatWorkload(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<AdditionQueueDescriptor>(descriptor, info)
+NeonAdditionWorkload::NeonAdditionWorkload(const AdditionQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<AdditionQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("NeonAdditionFloatWorkload", 2, 1);
+ m_Data.ValidateInputsOutputs("NeonAdditionWorkload", 2, 1);
arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
@@ -38,9 +38,9 @@ NeonAdditionFloatWorkload::NeonAdditionFloatWorkload(const AdditionQueueDescript
m_AddLayer.configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);
}
-void NeonAdditionFloatWorkload::Execute() const
+void NeonAdditionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonAdditionFloatWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonAdditionWorkload_Execute");
m_AddLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonAdditionFloatWorkload.hpp b/src/backends/neon/workloads/NeonAdditionWorkload.hpp
index 1eab6663a6..ca8ae8d7b7 100644
--- a/src/backends/neon/workloads/NeonAdditionFloatWorkload.hpp
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.hpp
@@ -14,10 +14,10 @@ arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output);
-class NeonAdditionFloatWorkload : public FloatWorkload<AdditionQueueDescriptor>
+class NeonAdditionWorkload : public BaseWorkload<AdditionQueueDescriptor>
{
public:
- NeonAdditionFloatWorkload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonAdditionWorkload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
private:
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index e55cf0d332..48bd137560 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -5,7 +5,7 @@
#pragma once
#include "NeonActivationWorkload.hpp"
-#include "NeonAdditionFloatWorkload.hpp"
+#include "NeonAdditionWorkload.hpp"
#include "NeonBatchNormalizationFloatWorkload.hpp"
#include "NeonConstantWorkload.hpp"
#include "NeonConvertFp16ToFp32Workload.hpp"