aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/backends/NeonWorkloads
diff options
context:
space:
mode:
authorarovir01 <Aron.Virginas-Tar@arm.com>2018-08-31 15:26:35 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-09-17 17:21:25 +0100
commit9e53a35b66b1ec7ceee7c712380a13596175b83b (patch)
treed40bf9f27c799184324b6ab91cbb1a546fc4012e /src/armnn/backends/NeonWorkloads
parent5540d2f379b15503269d1b9b5fbe8fbafd160d2e (diff)
downloadarmnn-9e53a35b66b1ec7ceee7c712380a13596175b83b.tar.gz
IVGCVSW-1784: Rename float32 workloads for ACL
Change-Id: I98bdfe9cb12c663d1d5cfa456e2cc967d70ab22b
Diffstat (limited to 'src/armnn/backends/NeonWorkloads')
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonActivationFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonActivationFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonAdditionFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonAdditionFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.hpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConstantFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.cpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConstantFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.hpp)2
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.hpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonFloorFloatWorkload.cpp30
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonFloorFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonLstmFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonLstmFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonLstmFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonLstmFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonMergerFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.cpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonMergerFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.hpp)2
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonMultiplicationFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonMultiplicationFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.cpp27
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonPooling2dFloatWorkload.cpp27
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonPooling2dFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.hpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.cpp32
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonReshapeFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.cpp)18
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonReshapeFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSoftmaxFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSoftmaxFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSplitterFloatWorkload.cpp (renamed from src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.cpp)6
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSplitterFloatWorkload.hpp (renamed from src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.hpp)2
36 files changed, 177 insertions, 177 deletions
diff --git a/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonActivationFloatWorkload.cpp
index 711bfceeaf..e3524dced6 100644
--- a/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonActivationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonActivationFloat32Workload.hpp"
+#include "NeonActivationFloatWorkload.hpp"
#include "backends/ArmComputeUtils.hpp"
@@ -32,11 +32,11 @@ arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input,
activationLayerInfo);
}
-NeonActivationFloat32Workload::NeonActivationFloat32Workload(const ActivationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+NeonActivationFloatWorkload::NeonActivationFloatWorkload(const ActivationQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
: FloatWorkload<ActivationQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("NeonActivationFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonActivationFloatWorkload", 1, 1);
const arm_compute::ActivationLayerInfo activationLayerInfo =
ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters);
@@ -47,9 +47,9 @@ NeonActivationFloat32Workload::NeonActivationFloat32Workload(const ActivationQue
m_ActivationLayer.configure(&input, &output, activationLayerInfo);
}
-void NeonActivationFloat32Workload::Execute() const
+void NeonActivationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonActivationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonActivationFloatWorkload_Execute");
m_ActivationLayer.run();
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonActivationFloatWorkload.hpp
index 0d26b3b39f..c9f4bab5f0 100644
--- a/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonActivationFloatWorkload.hpp
@@ -14,10 +14,10 @@ arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor);
-class NeonActivationFloat32Workload : public FloatWorkload<ActivationQueueDescriptor>
+class NeonActivationFloatWorkload : public FloatWorkload<ActivationQueueDescriptor>
{
public:
- NeonActivationFloat32Workload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonActivationFloatWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonAdditionFloatWorkload.cpp
index f26e42aff9..1c1e65c206 100644
--- a/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonAdditionFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonAdditionFloat32Workload.hpp"
+#include "NeonAdditionFloatWorkload.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
#include "backends/CpuTensorHandle.hpp"
@@ -25,11 +25,11 @@ arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo& input0,
}
-NeonAdditionFloat32Workload::NeonAdditionFloat32Workload(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+NeonAdditionFloatWorkload::NeonAdditionFloatWorkload(const AdditionQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
: FloatWorkload<AdditionQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("NeonAdditionFloat32Workload", 2, 1);
+ m_Data.ValidateInputsOutputs("NeonAdditionFloatWorkload", 2, 1);
arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
@@ -38,9 +38,9 @@ NeonAdditionFloat32Workload::NeonAdditionFloat32Workload(const AdditionQueueDesc
m_AddLayer.configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);
}
-void NeonAdditionFloat32Workload::Execute() const
+void NeonAdditionFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonAdditionFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonAdditionFloatWorkload_Execute");
m_AddLayer.run();
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonAdditionFloatWorkload.hpp
index dae66bb69d..efab230925 100644
--- a/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonAdditionFloatWorkload.hpp
@@ -14,10 +14,10 @@ arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output);
-class NeonAdditionFloat32Workload : public FloatWorkload<AdditionQueueDescriptor>
+class NeonAdditionFloatWorkload : public FloatWorkload<AdditionQueueDescriptor>
{
public:
- NeonAdditionFloat32Workload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonAdditionFloatWorkload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloatWorkload.cpp
index ca5c8202cd..f24ff796cf 100644
--- a/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonBatchNormalizationFloat32Workload.hpp"
+#include "NeonBatchNormalizationFloatWorkload.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
#include "../../../../include/armnn/ArmNN.hpp"
@@ -37,11 +37,11 @@ arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input,
descriptor.m_Eps);
}
-NeonBatchNormalizationFloat32Workload::NeonBatchNormalizationFloat32Workload(
+NeonBatchNormalizationFloatWorkload::NeonBatchNormalizationFloatWorkload(
const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info)
: FloatWorkload<BatchNormalizationQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("NeonBatchNormalizationFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonBatchNormalizationFloatWorkload", 1, 1);
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -77,13 +77,13 @@ NeonBatchNormalizationFloat32Workload::NeonBatchNormalizationFloat32Workload(
FreeUnusedTensors();
}
-void NeonBatchNormalizationFloat32Workload::Execute() const
+void NeonBatchNormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonBatchNormalizationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonBatchNormalizationFloatWorkload_Execute");
m_Layer.run();
}
-void NeonBatchNormalizationFloat32Workload::FreeUnusedTensors()
+void NeonBatchNormalizationFloatWorkload::FreeUnusedTensors()
{
FreeTensorIfUnused(m_Mean);
FreeTensorIfUnused(m_Variance);
diff --git a/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloatWorkload.hpp
index 5eb5601f26..2844c650ca 100644
--- a/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloatWorkload.hpp
@@ -18,11 +18,11 @@ arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor);
-class NeonBatchNormalizationFloat32Workload : public FloatWorkload<BatchNormalizationQueueDescriptor>
+class NeonBatchNormalizationFloatWorkload : public FloatWorkload<BatchNormalizationQueueDescriptor>
{
public:
- NeonBatchNormalizationFloat32Workload(const BatchNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info);
+ NeonBatchNormalizationFloatWorkload(const BatchNormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConstantFloatWorkload.cpp
index 4e5d570a8e..49749a8b08 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConstantFloatWorkload.cpp
@@ -3,14 +3,14 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonConstantFloat32Workload.hpp"
+#include "NeonConstantFloatWorkload.hpp"
namespace armnn
{
-void NeonConstantFloat32Workload::Execute() const
+void NeonConstantFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConstantFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConstantFloatWorkload_Execute");
NeonBaseConstantWorkload::Execute();
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonConstantFloatWorkload.hpp
index 050954df24..8ae0add2c8 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConstantFloatWorkload.hpp
@@ -10,7 +10,7 @@
namespace armnn
{
-class NeonConstantFloat32Workload : public NeonBaseConstantWorkload<DataType::Float16, DataType::Float32>
+class NeonConstantFloatWorkload : public NeonBaseConstantWorkload<DataType::Float16, DataType::Float32>
{
public:
using NeonBaseConstantWorkload<DataType::Float16, DataType::Float32>::NeonBaseConstantWorkload;
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloatWorkload.cpp
index 18ec6ca2e7..8037cb638f 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonConvolution2dFloat32Workload.hpp"
+#include "NeonConvolution2dFloatWorkload.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
#include "backends/NeonLayerSupport.hpp"
@@ -12,7 +12,7 @@ namespace armnn
{
using namespace armcomputetensorutils;
-NeonConvolution2dFloat32Workload::NeonConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor,
+NeonConvolution2dFloatWorkload::NeonConvolution2dFloatWorkload(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: NeonConvolution2dBaseWorkload(descriptor, info, memoryManager)
{
@@ -25,15 +25,15 @@ NeonConvolution2dFloat32Workload::NeonConvolution2dFloat32Workload(const Convolu
FreeUnusedTensors();
}
-void NeonConvolution2dFloat32Workload::Execute() const
+void NeonConvolution2dFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConvolution2dFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConvolution2dFloatWorkload_Execute");
m_ConvolutionLayer->run();
}
-void NeonConvolution2dFloat32Workload::ValidateData() const
+void NeonConvolution2dFloatWorkload::ValidateData() const
{
- m_Data.ValidateInputsOutputs("NeonConvolution2dFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonConvolution2dFloatWorkload", 1, 1);
}
} //namespace armnn
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloatWorkload.hpp
index 0bb8d69d94..1d08118b61 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloatWorkload.hpp
@@ -15,11 +15,11 @@
namespace armnn
{
-class NeonConvolution2dFloat32Workload : public NeonConvolution2dBaseWorkload<DataType::Float16, DataType::Float32>
+class NeonConvolution2dFloatWorkload : public NeonConvolution2dBaseWorkload<DataType::Float16, DataType::Float32>
{
public:
- NeonConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+ NeonConvolution2dFloatWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
void Execute() const override;
void ValidateData() const override;
diff --git a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.cpp
index f94cd903b6..96ca938242 100644
--- a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonDepthwiseConvolutionFloat32Workload.hpp"
+#include "NeonDepthwiseConvolutionFloatWorkload.hpp"
#include "backends/NeonLayerSupport.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
@@ -13,7 +13,7 @@ namespace armnn
{
using namespace armcomputetensorutils;
-NeonDepthwiseConvolutionFloat32Workload::NeonDepthwiseConvolutionFloat32Workload(
+NeonDepthwiseConvolutionFloatWorkload::NeonDepthwiseConvolutionFloatWorkload(
const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info)
: FloatWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
@@ -37,7 +37,7 @@ NeonDepthwiseConvolutionFloat32Workload::NeonDepthwiseConvolutionFloat32Workload
m_Data.m_Parameters.m_PadBottom,
arm_compute::DimensionRoundingType::FLOOR);
- m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionFloatWorkload", 1, 1);
arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -77,15 +77,15 @@ NeonDepthwiseConvolutionFloat32Workload::NeonDepthwiseConvolutionFloat32Workload
FreeUnusedTensors();
}
-void NeonDepthwiseConvolutionFloat32Workload::Execute() const
+void NeonDepthwiseConvolutionFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionFloatWorkload_Execute");
BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
m_pDepthwiseConvolutionLayer->run();
}
-void NeonDepthwiseConvolutionFloat32Workload::FreeUnusedTensors()
+void NeonDepthwiseConvolutionFloatWorkload::FreeUnusedTensors()
{
FreeTensorIfUnused(m_KernelTensor);
FreeTensorIfUnused(m_BiasTensor);
diff --git a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.hpp
index ece9f1877b..670182eef0 100644
--- a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.hpp
@@ -10,11 +10,11 @@
namespace armnn
{
-class NeonDepthwiseConvolutionFloat32Workload : public FloatWorkload<DepthwiseConvolution2dQueueDescriptor>
+class NeonDepthwiseConvolutionFloatWorkload : public FloatWorkload<DepthwiseConvolution2dQueueDescriptor>
{
public:
- NeonDepthwiseConvolutionFloat32Workload(const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info);
+ NeonDepthwiseConvolutionFloatWorkload(const DepthwiseConvolution2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonFloorFloatWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonFloorFloatWorkload.cpp
new file mode 100644
index 0000000000..4d2cb029f5
--- /dev/null
+++ b/src/armnn/backends/NeonWorkloads/NeonFloorFloatWorkload.cpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "NeonFloorFloatWorkload.hpp"
+
+namespace armnn
+{
+NeonFloorFloatWorkload::NeonFloorFloatWorkload(const FloorQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : FloatWorkload<FloorQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonFloorFloatWorkload", 1, 1);
+
+ arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_Layer.configure(&input, &output);
+}
+
+void NeonFloorFloatWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonFloorFloatWorkload_Execute");
+ m_Layer.run();
+}
+} //namespace armnn
+
+
+
diff --git a/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonFloorFloatWorkload.hpp
index 56680f1e39..1443d9a613 100644
--- a/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonFloorFloatWorkload.hpp
@@ -10,10 +10,10 @@
namespace armnn
{
-class NeonFloorFloat32Workload : public FloatWorkload<FloorQueueDescriptor>
+class NeonFloorFloatWorkload : public FloatWorkload<FloorQueueDescriptor>
{
public:
- NeonFloorFloat32Workload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonFloorFloatWorkload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloatWorkload.cpp
index c3af41e20d..098d84bf0c 100644
--- a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonFullyConnectedFloat32Workload.hpp"
+#include "NeonFullyConnectedFloatWorkload.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
#include "backends/ArmComputeUtils.hpp"
@@ -42,12 +42,12 @@ arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
fullyConnectedLayerInfo);
}
-NeonFullyConnectedFloat32Workload::NeonFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor,
+NeonFullyConnectedFloatWorkload::NeonFullyConnectedFloatWorkload(const FullyConnectedQueueDescriptor& descriptor,
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
, m_FullyConnectedLayer(memoryManager)
{
- m_Data.ValidateInputsOutputs("NeonFullyConnectedFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonFullyConnectedFloatWorkload", 1, 1);
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -80,13 +80,13 @@ NeonFullyConnectedFloat32Workload::NeonFullyConnectedFloat32Workload(const Fully
FreeUnusedTensors();
}
-void NeonFullyConnectedFloat32Workload::Execute() const
+void NeonFullyConnectedFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonFullyConnectedFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonFullyConnectedFloatWorkload_Execute");
m_FullyConnectedLayer.run();
}
-void NeonFullyConnectedFloat32Workload::FreeUnusedTensors()
+void NeonFullyConnectedFloatWorkload::FreeUnusedTensors()
{
FreeTensorIfUnused(m_WeightsTensor);
FreeTensorIfUnused(m_BiasesTensor);
diff --git a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloatWorkload.hpp
index 684b5e0753..1f4464f0e1 100644
--- a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloatWorkload.hpp
@@ -20,11 +20,11 @@ arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor);
-class NeonFullyConnectedFloat32Workload : public FloatWorkload<FullyConnectedQueueDescriptor>
+class NeonFullyConnectedFloatWorkload : public FloatWorkload<FullyConnectedQueueDescriptor>
{
public:
- NeonFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+ NeonFullyConnectedFloatWorkload(const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloatWorkload.cpp
index a3ae33f41f..bf0ef01349 100644
--- a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonL2NormalizationFloat32Workload.hpp"
+#include "NeonL2NormalizationFloatWorkload.hpp"
#include "backends/ArmComputeUtils.hpp"
namespace armnn
@@ -21,21 +21,21 @@ arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input,
return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
}
-NeonL2NormalizationFloat32Workload::NeonL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor,
+NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<L2NormalizationQueueDescriptor>(descriptor, info)
, m_Layer(memoryManager)
{
- m_Data.ValidateInputsOutputs("NeonL2NormalizationFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonL2NormalizationFloatWorkload", 1, 1);
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0]));
}
-void NeonL2NormalizationFloat32Workload::Execute() const
+void NeonL2NormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonL2NormalizationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonL2NormalizationFloatWorkload_Execute");
m_Layer.run();
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloatWorkload.hpp
index c3fcde5a57..5edff075f6 100644
--- a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloatWorkload.hpp
@@ -17,11 +17,11 @@ namespace armnn
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output);
-class NeonL2NormalizationFloat32Workload : public FloatWorkload<L2NormalizationQueueDescriptor>
+class NeonL2NormalizationFloatWorkload : public FloatWorkload<L2NormalizationQueueDescriptor>
{
public:
- NeonL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+ NeonL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonLstmFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonLstmFloatWorkload.cpp
index ba1369e179..8159e9feb4 100644
--- a/src/armnn/backends/NeonWorkloads/NeonLstmFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonLstmFloatWorkload.cpp
@@ -3,18 +3,18 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonLstmFloat32Workload.hpp"
+#include "NeonLstmFloatWorkload.hpp"
namespace armnn
{
-NeonLstmFloat32Workload::NeonLstmFloat32Workload(const LstmQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
: FloatWorkload<LstmQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("NeonLstmFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonLstmFloatWorkload", 1, 1);
}
-void NeonLstmFloat32Workload::Execute() const
+void NeonLstmFloatWorkload::Execute() const
{
throw armnn::Exception("No implementation of Lstm in the Neon backend!");
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonLstmFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonLstmFloatWorkload.hpp
index 78ee1da341..fc06d55e6d 100644
--- a/src/armnn/backends/NeonWorkloads/NeonLstmFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonLstmFloatWorkload.hpp
@@ -10,10 +10,10 @@
namespace armnn
{
-class NeonLstmFloat32Workload : public FloatWorkload<LstmQueueDescriptor>
+class NeonLstmFloatWorkload : public FloatWorkload<LstmQueueDescriptor>
{
public:
- NeonLstmFloat32Workload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonLstmFloatWorkload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
};
diff --git a/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonMergerFloatWorkload.cpp
index 30dd283620..02caca91bc 100644
--- a/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonMergerFloatWorkload.cpp
@@ -3,14 +3,14 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonMergerFloat32Workload.hpp"
+#include "NeonMergerFloatWorkload.hpp"
namespace armnn
{
-void NeonMergerFloat32Workload::Execute() const
+void NeonMergerFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerFloatWorkload_Execute");
NeonBaseMergerWorkload::Execute();
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonMergerFloatWorkload.hpp
index 7b8ee9881f..c9f8e66e38 100644
--- a/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonMergerFloatWorkload.hpp
@@ -10,7 +10,7 @@
namespace armnn
{
-class NeonMergerFloat32Workload : public NeonBaseMergerWorkload<DataType::Float16, DataType::Float32>
+class NeonMergerFloatWorkload : public NeonBaseMergerWorkload<DataType::Float16, DataType::Float32>
{
public:
using NeonBaseMergerWorkload<DataType::Float16, DataType::Float32>::NeonBaseMergerWorkload;
diff --git a/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloatWorkload.cpp
index a8a3cd77b4..ca38a24b09 100644
--- a/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonMultiplicationFloat32Workload.hpp"
+#include "NeonMultiplicationFloatWorkload.hpp"
namespace armnn
@@ -28,11 +28,11 @@ arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo& input0,
arm_compute::RoundingPolicy::TO_ZERO);
}
-NeonMultiplicationFloat32Workload::NeonMultiplicationFloat32Workload(const MultiplicationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+NeonMultiplicationFloatWorkload::NeonMultiplicationFloatWorkload(const MultiplicationQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
: FloatWorkload<MultiplicationQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("NeonMultiplicationFloat32Workload", 2, 1);
+ m_Data.ValidateInputsOutputs("NeonMultiplicationFloatWorkload", 2, 1);
arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
@@ -49,9 +49,9 @@ NeonMultiplicationFloat32Workload::NeonMultiplicationFloat32Workload(const Multi
arm_compute::RoundingPolicy::TO_ZERO);
}
-void NeonMultiplicationFloat32Workload::Execute() const
+void NeonMultiplicationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMultiplicationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMultiplicationFloatWorkload_Execute");
m_PixelWiseMultiplication.run();
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloatWorkload.hpp
index 62e84a2e07..eb0f7741f0 100644
--- a/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloatWorkload.hpp
@@ -13,10 +13,10 @@ arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output);
-class NeonMultiplicationFloat32Workload : public FloatWorkload<MultiplicationQueueDescriptor>
+class NeonMultiplicationFloatWorkload : public FloatWorkload<MultiplicationQueueDescriptor>
{
public:
- NeonMultiplicationFloat32Workload(const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonMultiplicationFloatWorkload(const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.cpp
index 20936a2760..8c2a87d8bc 100644
--- a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonNormalizationFloat32Workload.hpp"
+#include "NeonNormalizationFloatWorkload.hpp"
#include "backends/NeonLayerSupport.hpp"
#include "backends/ArmComputeUtils.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
@@ -24,13 +24,13 @@ arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input,
return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
}
-NeonNormalizationFloat32Workload::NeonNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor,
+NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<NormalizationQueueDescriptor>(descriptor, info)
, m_NormalizationLayer(memoryManager)
{
- m_Data.ValidateInputsOutputs("NeonNormalizationFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1);
std::string reasonIfUnsupported;
if (!IsNeonNormalizationDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters))
{
@@ -61,9 +61,9 @@ NeonNormalizationFloat32Workload::NeonNormalizationFloat32Workload(const Normali
m_NormalizationLayer.configure(&input, &output, normalizationInfo);
}
-void NeonNormalizationFloat32Workload::Execute() const
+void NeonNormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonNormalizationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonNormalizationFloatWorkload_Execute");
m_NormalizationLayer.run();
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.hpp
index 8f0823454b..8e8cc40c5a 100644
--- a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.hpp
@@ -16,11 +16,11 @@ arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor);
-class NeonNormalizationFloat32Workload : public FloatWorkload<NormalizationQueueDescriptor>
+class NeonNormalizationFloatWorkload : public FloatWorkload<NormalizationQueueDescriptor>
{
public:
- NeonNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+ NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.cpp
deleted file mode 100644
index cb690c51b8..0000000000
--- a/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// See LICENSE file in the project root for full license information.
-//
-
-#include "NeonPooling2dFloat32Workload.hpp"
-
-
-
-namespace armnn
-{
-
-NeonPooling2dFloat32Workload::NeonPooling2dFloat32Workload(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : NeonPooling2dBaseWorkload<armnn::DataType::Float16, armnn::DataType::Float32>(descriptor, info,
- "NeonPooling2dFloat32Workload")
-{
-}
-
-void NeonPooling2dFloat32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonPooling2dFloat32Workload_Execute");
- m_PoolingLayer.run();
-}
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/NeonWorkloads/NeonPooling2dFloatWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonPooling2dFloatWorkload.cpp
new file mode 100644
index 0000000000..90e09dbaf4
--- /dev/null
+++ b/src/armnn/backends/NeonWorkloads/NeonPooling2dFloatWorkload.cpp
@@ -0,0 +1,27 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "NeonPooling2dFloatWorkload.hpp"
+
+
+
+namespace armnn
+{
+
+NeonPooling2dFloatWorkload::NeonPooling2dFloatWorkload(const Pooling2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : NeonPooling2dBaseWorkload<armnn::DataType::Float16, armnn::DataType::Float32>(descriptor, info,
+ "NeonPooling2dFloatWorkload")
+{
+}
+
+void NeonPooling2dFloatWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonPooling2dFloatWorkload_Execute");
+ m_PoolingLayer.run();
+}
+
+} //namespace armnn
+
diff --git a/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonPooling2dFloatWorkload.hpp
index 36c4e7edf1..ba4e9cab6d 100644
--- a/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonPooling2dFloatWorkload.hpp
@@ -11,11 +11,11 @@
namespace armnn
{
-class NeonPooling2dFloat32Workload : public NeonPooling2dBaseWorkload<armnn::DataType::Float16,
- armnn::DataType::Float32>
+class NeonPooling2dFloatWorkload : public NeonPooling2dBaseWorkload<armnn::DataType::Float16,
+ armnn::DataType::Float32>
{
public:
- NeonPooling2dFloat32Workload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonPooling2dFloatWorkload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
};
diff --git a/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.cpp
deleted file mode 100644
index 93f6eb8ef5..0000000000
--- a/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// See LICENSE file in the project root for full license information.
-//
-
-#include "NeonReshapeFloat32Workload.hpp"
-
-
-
-namespace armnn
-{
-
-NeonReshapeFloat32Workload::NeonReshapeFloat32Workload(const ReshapeQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<ReshapeQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("NeonReshapeFloat32Workload", 1, 1);
-
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_Layer.configure(&input, &output);
-}
-
-void NeonReshapeFloat32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonReshapeFloat32Workload_Execute");
- m_Layer.run();
-}
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonReshapeFloatWorkload.cpp
index c43cfa9c46..ef229b252f 100644
--- a/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonReshapeFloatWorkload.cpp
@@ -3,15 +3,18 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonFloorFloat32Workload.hpp"
+#include "NeonReshapeFloatWorkload.hpp"
+
+
namespace armnn
{
-NeonFloorFloat32Workload::NeonFloorFloat32Workload(const FloorQueueDescriptor& descriptor,
+
+NeonReshapeFloatWorkload::NeonReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info)
- : FloatWorkload<FloorQueueDescriptor>(descriptor, info)
+ : FloatWorkload<ReshapeQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("NeonFloorFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonReshapeFloatWorkload", 1, 1);
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -19,12 +22,11 @@ NeonFloorFloat32Workload::NeonFloorFloat32Workload(const FloorQueueDescriptor& d
m_Layer.configure(&input, &output);
}
-void NeonFloorFloat32Workload::Execute() const
+void NeonReshapeFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonFloorFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonReshapeFloatWorkload_Execute");
m_Layer.run();
}
-} //namespace armnn
-
+} //namespace armnn
diff --git a/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonReshapeFloatWorkload.hpp
index 3e5cca1b9e..d32d779962 100644
--- a/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonReshapeFloatWorkload.hpp
@@ -10,10 +10,10 @@
namespace armnn
{
-class NeonReshapeFloat32Workload : public FloatWorkload<ReshapeQueueDescriptor>
+class NeonReshapeFloatWorkload : public FloatWorkload<ReshapeQueueDescriptor>
{
public:
- NeonReshapeFloat32Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloatWorkload.cpp
index 027b508ad5..981e63ea55 100644
--- a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloatWorkload.cpp
@@ -3,17 +3,17 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonSoftmaxFloat32Workload.hpp"
+#include "NeonSoftmaxFloatWorkload.hpp"
namespace armnn
{
-NeonSoftmaxFloat32Workload::NeonSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor,
+NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
, m_SoftmaxLayer(memoryManager)
{
- m_Data.ValidateInputsOutputs("NeonSoftmaxFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1);
// The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions.
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
@@ -22,9 +22,9 @@ NeonSoftmaxFloat32Workload::NeonSoftmaxFloat32Workload(const SoftmaxQueueDescrip
m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta);
}
-void NeonSoftmaxFloat32Workload::Execute() const
+void NeonSoftmaxFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxFloatWorkload_Execute");
m_SoftmaxLayer.run();
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloatWorkload.hpp
index 3656a26a3c..52dbded5dd 100644
--- a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloatWorkload.hpp
@@ -14,11 +14,11 @@
namespace armnn
{
-class NeonSoftmaxFloat32Workload : public FloatWorkload<SoftmaxQueueDescriptor>
+class NeonSoftmaxFloatWorkload : public FloatWorkload<SoftmaxQueueDescriptor>
{
public:
- NeonSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+ NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSplitterFloatWorkload.cpp
index 996fc15adb..06e300cd5a 100644
--- a/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonSplitterFloatWorkload.cpp
@@ -3,14 +3,14 @@
// See LICENSE file in the project root for full license information.
//
-#include "NeonSplitterFloat32Workload.hpp"
+#include "NeonSplitterFloatWorkload.hpp"
namespace armnn
{
-void NeonSplitterFloat32Workload::Execute() const
+void NeonSplitterFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSplitterFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSplitterFloatWorkload_Execute");
NeonBaseSplitterWorkload::Execute();
}
diff --git a/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonSplitterFloatWorkload.hpp
index 9f6dc75499..5d6b328413 100644
--- a/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonSplitterFloatWorkload.hpp
@@ -10,7 +10,7 @@
namespace armnn
{
-class NeonSplitterFloat32Workload : public NeonBaseSplitterWorkload<DataType::Float16, DataType::Float32>
+class NeonSplitterFloatWorkload : public NeonBaseSplitterWorkload<DataType::Float16, DataType::Float32>
{
public:
using NeonBaseSplitterWorkload<DataType::Float16, DataType::Float32>::NeonBaseSplitterWorkload;