aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
authorMatthew Bentham <matthew.bentham@arm.com>2018-12-12 16:15:59 +0000
committerMatthew Bentham <matthew.bentham@arm.com>2018-12-31 15:56:48 +0000
commitc48ac8c8cea1748ebfef15144f070799d4a129c3 (patch)
tree62eead8b1d684fa7edbd3e2a1a70e4ed871a1f30 /src/backends
parentfbdad03c927aa5d30deec6fa1a61eef10f8c265f (diff)
downloadarmnn-c48ac8c8cea1748ebfef15144f070799d4a129c3.tar.gz
MLCE-80 Remove strong typing from NeonBatchNormalization
Technical debt work towards adding some new Neon workloads Change-Id: I08ab6dd14d0e89d4ebc8a878fb69caa5681012bf
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp2
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/backend.mk2
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp8
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt4
-rw-r--r--src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp (renamed from src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp)14
-rw-r--r--src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp (renamed from src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.hpp)8
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp2
8 files changed, 20 insertions, 22 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 724455de40..7efdf159c9 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -19,7 +19,7 @@
#ifdef ARMCOMPUTENEON_ENABLED
#include "workloads/NeonAdditionWorkload.hpp"
#include "workloads/NeonActivationWorkload.hpp"
-#include "workloads/NeonBatchNormalizationFloatWorkload.hpp"
+#include "workloads/NeonBatchNormalizationWorkload.hpp"
#include "workloads/NeonConvolution2dWorkload.hpp"
#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 848a6f397b..85e5768571 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -185,7 +185,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkloadHelper<NeonBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info);
+ return std::make_unique<NeonBatchNormalizationWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index a3058ad73e..fdfd696fbe 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -15,7 +15,7 @@ BACKEND_SOURCES := \
NeonWorkloadFactory.cpp \
workloads/NeonActivationWorkload.cpp \
workloads/NeonAdditionWorkload.cpp \
- workloads/NeonBatchNormalizationFloatWorkload.cpp \
+ workloads/NeonBatchNormalizationWorkload.cpp \
workloads/NeonConstantWorkload.cpp \
workloads/NeonConvertFp16ToFp32Workload.cpp \
workloads/NeonConvertFp32ToFp16Workload.cpp \
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index adb64cba9b..86fc457130 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -183,23 +183,23 @@ static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
{
- NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
+ NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NhwcWorkload)
{
- NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
+ NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NHWC);
}
#endif
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
{
- NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
+ NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
{
- NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
+ NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NHWC);
}
template <typename armnn::DataType DataType>
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 9961397445..7b0251ce04 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -8,8 +8,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonActivationWorkload.hpp
NeonAdditionWorkload.cpp
NeonAdditionWorkload.hpp
- NeonBatchNormalizationFloatWorkload.cpp
- NeonBatchNormalizationFloatWorkload.hpp
+ NeonBatchNormalizationWorkload.cpp
+ NeonBatchNormalizationWorkload.hpp
NeonConstantWorkload.cpp
NeonConstantWorkload.hpp
NeonConvertFp16ToFp32Workload.cpp
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
index a8181f66d9..44d5035431 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "NeonBatchNormalizationFloatWorkload.hpp"
+#include "NeonBatchNormalizationWorkload.hpp"
#include <backendsCommon/CpuTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/ArmNN.hpp>
@@ -43,11 +43,11 @@ arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input,
descriptor.m_Eps);
}
-NeonBatchNormalizationFloatWorkload::NeonBatchNormalizationFloatWorkload(
+NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload(
const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info)
- : FloatWorkload<BatchNormalizationQueueDescriptor>(descriptor, info)
+ : BaseWorkload<BatchNormalizationQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("NeonBatchNormalizationFloatWorkload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1);
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -87,13 +87,13 @@ NeonBatchNormalizationFloatWorkload::NeonBatchNormalizationFloatWorkload(
FreeUnusedTensors();
}
-void NeonBatchNormalizationFloatWorkload::Execute() const
+void NeonBatchNormalizationWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonBatchNormalizationFloatWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonBatchNormalizationWorkload_Execute");
m_Layer.run();
}
-void NeonBatchNormalizationFloatWorkload::FreeUnusedTensors()
+void NeonBatchNormalizationWorkload::FreeUnusedTensors()
{
FreeTensorIfUnused(m_Mean);
FreeTensorIfUnused(m_Variance);
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.hpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp
index a6289bd900..52e4db7c90 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.hpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.hpp
@@ -18,11 +18,11 @@ arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor);
-class NeonBatchNormalizationFloatWorkload : public FloatWorkload<BatchNormalizationQueueDescriptor>
+class NeonBatchNormalizationWorkload : public BaseWorkload<BatchNormalizationQueueDescriptor>
{
public:
- NeonBatchNormalizationFloatWorkload(const BatchNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info);
+ NeonBatchNormalizationWorkload(const BatchNormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info);
virtual void Execute() const override;
private:
@@ -38,5 +38,3 @@ private:
} //namespace armnn
-
-
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 82142f2aa1..1f08d039ae 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -6,7 +6,7 @@
#pragma once
#include "NeonActivationWorkload.hpp"
#include "NeonAdditionWorkload.hpp"
-#include "NeonBatchNormalizationFloatWorkload.hpp"
+#include "NeonBatchNormalizationWorkload.hpp"
#include "NeonConstantWorkload.hpp"
#include "NeonConvertFp16ToFp32Workload.hpp"
#include "NeonConvertFp32ToFp16Workload.hpp"