aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/backends/ClWorkloads
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/backends/ClWorkloads')
-rw-r--r--src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/ClWorkloads/ClAdditionFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.cpp)6
-rw-r--r--src/armnn/backends/ClWorkloads/ClAdditionFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.hpp)2
-rw-r--r--src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.cpp)6
-rw-r--r--src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.hpp)2
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.cpp)8
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.hpp)6
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.cpp)8
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.hpp)8
-rw-r--r--src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/ClWorkloads/ClFullyConnectedFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/ClWorkloads/ClFullyConnectedFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.hpp)8
-rw-r--r--src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/ClWorkloads/ClLstmFloat32Workload.hpp67
-rw-r--r--src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClLstmFloat32Workload.cpp)48
-rw-r--r--src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.hpp67
-rw-r--r--src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.cpp)6
-rw-r--r--src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.hpp)2
-rw-r--r--src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.cpp)12
-rw-r--r--src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.hpp)7
-rw-r--r--src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.cpp)10
-rw-r--r--src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.hpp)4
-rw-r--r--src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.cpp (renamed from src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.cpp)6
-rw-r--r--src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.hpp (renamed from src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.hpp)2
37 files changed, 206 insertions, 207 deletions
diff --git a/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.cpp
index f072549cbc..1e5cf0c722 100644
--- a/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClActivationFloat32Workload.hpp"
+#include "ClActivationFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/ArmComputeUtils.hpp"
@@ -31,11 +31,11 @@ arm_compute::Status ClActivationWorkloadValidate(const TensorInfo& input,
activationLayerInfo);
}
-ClActivationFloat32Workload::ClActivationFloat32Workload(const ActivationQueueDescriptor& descriptor,
+ClActivationFloatWorkload::ClActivationFloatWorkload(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info)
: FloatWorkload<ActivationQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("ClActivationFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClActivationFloatWorkload", 1, 1);
const arm_compute::ActivationLayerInfo activationLayerInfo =
ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters);
@@ -45,9 +45,9 @@ ClActivationFloat32Workload::ClActivationFloat32Workload(const ActivationQueueDe
m_ActivationLayer.configure(&input, &output, activationLayerInfo);
}
-void ClActivationFloat32Workload::Execute() const
+void ClActivationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClActivationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClActivationFloatWorkload_Execute");
m_ActivationLayer.run();
}
diff --git a/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.hpp
index 9fbfe95856..56367ce14f 100644
--- a/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.hpp
@@ -14,10 +14,10 @@ arm_compute::Status ClActivationWorkloadValidate(const TensorInfo& input,
const ActivationDescriptor& descriptor);
// Activation layer execution.
-class ClActivationFloat32Workload : public FloatWorkload<ActivationQueueDescriptor>
+class ClActivationFloatWorkload : public FloatWorkload<ActivationQueueDescriptor>
{
public:
- ClActivationFloat32Workload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClActivationFloatWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
private:
diff --git a/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClAdditionFloatWorkload.cpp
index b69593f5f5..089b84a33f 100644
--- a/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClAdditionFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClAdditionFloat32Workload.hpp"
+#include "ClAdditionFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
@@ -13,9 +13,9 @@ namespace armnn
{
using namespace armcomputetensorutils;
-void ClAdditionFloat32Workload::Execute() const
+void ClAdditionFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionFloatWorkload_Execute");
ClAdditionBaseWorkload::Execute();
}
diff --git a/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClAdditionFloatWorkload.hpp
index 7eac485cfe..03df9cf0a5 100644
--- a/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClAdditionFloatWorkload.hpp
@@ -10,7 +10,7 @@
namespace armnn
{
-class ClAdditionFloat32Workload : public ClAdditionBaseWorkload<DataType::Float16, DataType::Float32>
+class ClAdditionFloatWorkload : public ClAdditionBaseWorkload<DataType::Float16, DataType::Float32>
{
public:
using ClAdditionBaseWorkload<DataType::Float16, DataType::Float32>::ClAdditionBaseWorkload;
diff --git a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.cpp
index 1849c5d411..a5d5b82f38 100644
--- a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClBatchNormalizationFloat32Workload.hpp"
+#include "ClBatchNormalizationFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
@@ -37,7 +37,7 @@ arm_compute::Status ClBatchNormalizationValidate(const TensorInfo& input,
desc.m_Eps);
}
-ClBatchNormalizationFloat32Workload::ClBatchNormalizationFloat32Workload(
+ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload(
const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info)
: FloatWorkload<BatchNormalizationQueueDescriptor>(descriptor, info)
{
@@ -53,7 +53,7 @@ ClBatchNormalizationFloat32Workload::ClBatchNormalizationFloat32Workload(
m_Beta = std::make_unique<arm_compute::CLTensor>();
BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo());
- m_Data.ValidateInputsOutputs("ClBatchNormalizationFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClBatchNormalizationFloatWorkload", 1, 1);
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -77,13 +77,13 @@ ClBatchNormalizationFloat32Workload::ClBatchNormalizationFloat32Workload(
FreeUnusedTensors();
}
-void ClBatchNormalizationFloat32Workload::Execute() const
+void ClBatchNormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClBatchNormalizationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClBatchNormalizationFloatWorkload_Execute");
m_Layer.run();
}
-void ClBatchNormalizationFloat32Workload::FreeUnusedTensors()
+void ClBatchNormalizationFloatWorkload::FreeUnusedTensors()
{
FreeTensorIfUnused(m_Mean);
FreeTensorIfUnused(m_Variance);
diff --git a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.hpp
index a45614a284..edc0d3c802 100644
--- a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.hpp
@@ -18,10 +18,10 @@ arm_compute::Status ClBatchNormalizationValidate(const TensorInfo& input,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& desc);
-class ClBatchNormalizationFloat32Workload : public FloatWorkload<BatchNormalizationQueueDescriptor>
+class ClBatchNormalizationFloatWorkload : public FloatWorkload<BatchNormalizationQueueDescriptor>
{
public:
- ClBatchNormalizationFloat32Workload(const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClBatchNormalizationFloatWorkload(const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
using FloatWorkload<BatchNormalizationQueueDescriptor>::FloatWorkload;
void Execute() const override;
diff --git a/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.cpp
index 58594999a8..842f757ed1 100644
--- a/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.cpp
@@ -3,13 +3,13 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClConstantFloat32Workload.hpp"
+#include "ClConstantFloatWorkload.hpp"
namespace armnn
{
-void ClConstantFloat32Workload::Execute() const
+void ClConstantFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClConstantFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClConstantFloatWorkload_Execute");
ClBaseConstantWorkload::Execute();
}
diff --git a/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.hpp
index 11c3fda8db..52b1be46b1 100644
--- a/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.hpp
@@ -9,7 +9,7 @@
namespace armnn
{
-class ClConstantFloat32Workload : public ClBaseConstantWorkload<DataType::Float16, DataType::Float32>
+class ClConstantFloatWorkload : public ClBaseConstantWorkload<DataType::Float16, DataType::Float32>
{
public:
using ClBaseConstantWorkload<DataType::Float16, DataType::Float32>::ClBaseConstantWorkload;
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp
index 9ac31df5c1..375ace277a 100644
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClConvolution2dFloat32Workload.hpp"
+#include "ClConvolution2dFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
@@ -13,7 +13,7 @@ namespace armnn
{
using namespace armcomputetensorutils;
-ClConvolution2dFloat32Workload::ClConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor,
+ClConvolution2dFloatWorkload::ClConvolution2dFloatWorkload(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<Convolution2dQueueDescriptor>(descriptor, info)
, m_ConvolutionLayer(memoryManager)
@@ -63,14 +63,14 @@ ClConvolution2dFloat32Workload::ClConvolution2dFloat32Workload(const Convolution
FreeUnusedTensors();
}
-void ClConvolution2dFloat32Workload::Execute() const
+void ClConvolution2dFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dFloat32Workload_Execute");
m_ConvolutionLayer.run();
}
-void ClConvolution2dFloat32Workload::FreeUnusedTensors()
+void ClConvolution2dFloatWorkload::FreeUnusedTensors()
{
FreeTensorIfUnused(m_KernelTensor);
FreeTensorIfUnused(m_BiasTensor);
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.hpp
index 51c21aec32..b0af0951de 100644
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.hpp
@@ -14,11 +14,11 @@
namespace armnn
{
-class ClConvolution2dFloat32Workload : public FloatWorkload<Convolution2dQueueDescriptor>
+class ClConvolution2dFloatWorkload : public FloatWorkload<Convolution2dQueueDescriptor>
{
public:
- ClConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+ ClConvolution2dFloatWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
void Execute() const override;
private:
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.cpp
index 96d97ad4ea..79aef3ee4e 100644
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClDepthwiseConvolutionFloat32Workload.hpp"
+#include "ClDepthwiseConvolutionFloatWorkload.hpp"
#include "backends/ClWorkloadUtils.hpp"
#include "backends/CpuTensorHandle.hpp"
@@ -11,7 +11,7 @@
namespace armnn
{
-ClDepthwiseConvolutionFloat32Workload::ClDepthwiseConvolutionFloat32Workload(
+ClDepthwiseConvolutionFloatWorkload::ClDepthwiseConvolutionFloatWorkload(
const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info)
: ClDepthwiseConvolutionBaseWorkload(descriptor, info)
@@ -27,9 +27,9 @@ ClDepthwiseConvolutionFloat32Workload::ClDepthwiseConvolutionFloat32Workload(
FreeUnusedTensors();
}
-void ClDepthwiseConvolutionFloat32Workload::Execute() const
+void ClDepthwiseConvolutionFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionFloatWorkload_Execute");
BOOST_ASSERT(m_DepthwiseConvolutionLayer);
m_DepthwiseConvolutionLayer->run();
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.hpp
index 669fd928b5..2107a7ec61 100644
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.hpp
@@ -12,12 +12,12 @@
namespace armnn
{
-class ClDepthwiseConvolutionFloat32Workload : public ClDepthwiseConvolutionBaseWorkload<DataType::Float16,
- DataType::Float32>
+class ClDepthwiseConvolutionFloatWorkload : public ClDepthwiseConvolutionBaseWorkload<DataType::Float16,
+ DataType::Float32>
{
public:
- ClDepthwiseConvolutionFloat32Workload(const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info);
+ ClDepthwiseConvolutionFloatWorkload(const DepthwiseConvolution2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info);
void Execute() const override;
};
diff --git a/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.cpp
index da71c50305..3ec7e753bb 100644
--- a/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.cpp
@@ -3,16 +3,16 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClFloorFloat32Workload.hpp"
+#include "ClFloorFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
namespace armnn
{
-ClFloorFloat32Workload::ClFloorFloat32Workload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info)
+ClFloorFloatWorkload::ClFloorFloatWorkload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info)
: FloatWorkload<FloorQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("ClFloorFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClFloorFloatWorkload", 1, 1);
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -20,9 +20,9 @@ ClFloorFloat32Workload::ClFloorFloat32Workload(const FloorQueueDescriptor& descr
m_Layer.configure(&input, &output);
}
-void ClFloorFloat32Workload::Execute() const
+void ClFloorFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClFloorFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClFloorFloatWorkload_Execute");
m_Layer.run();
}
diff --git a/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.hpp
index bd7f3032fc..965ae06542 100644
--- a/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.hpp
@@ -10,10 +10,10 @@
namespace armnn
{
-class ClFloorFloat32Workload : public FloatWorkload<FloorQueueDescriptor>
+class ClFloorFloatWorkload : public FloatWorkload<FloorQueueDescriptor>
{
public:
- ClFloorFloat32Workload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClFloorFloatWorkload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
diff --git a/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClFullyConnectedFloatWorkload.cpp
index 5014dd27ca..9774368597 100644
--- a/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClFullyConnectedFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClFullyConnectedFloat32Workload.hpp"
+#include "ClFullyConnectedFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
@@ -42,7 +42,7 @@ arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
fullyConnectedLayerInfo);
}
-ClFullyConnectedFloat32Workload::ClFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor,
+ClFullyConnectedFloatWorkload::ClFullyConnectedFloatWorkload(const FullyConnectedQueueDescriptor& descriptor,
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
, m_FullyConnectedLayer(memoryManager)
@@ -56,7 +56,7 @@ ClFullyConnectedFloat32Workload::ClFullyConnectedFloat32Workload(const FullyConn
BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
}
- m_Data.ValidateInputsOutputs("ClFullyConnectedFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClFullyConnectedFloatWorkload", 1, 1);
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -80,13 +80,13 @@ ClFullyConnectedFloat32Workload::ClFullyConnectedFloat32Workload(const FullyConn
FreeUnusedTensors();
}
-void ClFullyConnectedFloat32Workload::Execute() const
+void ClFullyConnectedFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedFloatWorkload_Execute");
m_FullyConnectedLayer.run();
}
-void ClFullyConnectedFloat32Workload::FreeUnusedTensors()
+void ClFullyConnectedFloatWorkload::FreeUnusedTensors()
{
FreeTensorIfUnused(m_WeightsTensor);
FreeTensorIfUnused(m_BiasesTensor);
diff --git a/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClFullyConnectedFloatWorkload.hpp
index f580e580c6..1832e4f957 100644
--- a/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClFullyConnectedFloatWorkload.hpp
@@ -20,12 +20,12 @@ arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor);
-class ClFullyConnectedFloat32Workload : public armnn::FloatWorkload<armnn::FullyConnectedQueueDescriptor>
+class ClFullyConnectedFloatWorkload : public armnn::FloatWorkload<armnn::FullyConnectedQueueDescriptor>
{
public:
- ClFullyConnectedFloat32Workload(const armnn::FullyConnectedQueueDescriptor& descriptor,
- const armnn::WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+ ClFullyConnectedFloatWorkload(const armnn::FullyConnectedQueueDescriptor& descriptor,
+ const armnn::WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
using armnn::FloatWorkload<armnn::FullyConnectedQueueDescriptor>::m_Data;
void Execute() const override;
diff --git a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.cpp
index 628e38d3da..c79ed9716c 100644
--- a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClL2NormalizationFloat32Workload.hpp"
+#include "ClL2NormalizationFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeUtils.hpp"
@@ -24,20 +24,20 @@ arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo& input,
return arm_compute::CLNormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
}
-ClL2NormalizationFloat32Workload::ClL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
: FloatWorkload<L2NormalizationQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("ClL2NormalizationFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClL2NormalizationFloatWorkload", 1, 1);
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0]));
}
-void ClL2NormalizationFloat32Workload::Execute() const
+void ClL2NormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClL2NormalizationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClL2NormalizationFloatWorkload_Execute");
m_Layer.run();
}
diff --git a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.hpp
index bf898e31f7..a247c68a6b 100644
--- a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.hpp
@@ -13,10 +13,10 @@ namespace armnn
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output);
-class ClL2NormalizationFloat32Workload : public FloatWorkload<L2NormalizationQueueDescriptor>
+class ClL2NormalizationFloatWorkload : public FloatWorkload<L2NormalizationQueueDescriptor>
{
public:
- ClL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
diff --git a/src/armnn/backends/ClWorkloads/ClLstmFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClLstmFloat32Workload.hpp
deleted file mode 100644
index e2358ad10d..0000000000
--- a/src/armnn/backends/ClWorkloads/ClLstmFloat32Workload.hpp
+++ /dev/null
@@ -1,67 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// See LICENSE file in the project root for full license information.
-//
-
-#pragma once
-
-#include "backends/ClWorkloadUtils.hpp"
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class ClLstmFloat32Workload : public FloatWorkload<LstmQueueDescriptor>
-{
-public:
- ClLstmFloat32Workload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLLSTMLayer m_LstmLayer;
-
- std::unique_ptr<arm_compute::CLTensor> m_InputToInputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_InputToForgetWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_InputToCellWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_InputToOutputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_RecurrentToInputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_RecurrentToForgetWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_RecurrentToCellWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_RecurrentToOutputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_CellToInputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_CellToForgetWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_CellToOutputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_InputGateBiasTensor;
- std::unique_ptr<arm_compute::CLTensor> m_ForgetGateBiasTensor;
- std::unique_ptr<arm_compute::CLTensor> m_CellBiasTensor;
- std::unique_ptr<arm_compute::CLTensor> m_OutputGateBiasTensor;
- std::unique_ptr<arm_compute::CLTensor> m_ProjectionWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_ProjectionBiasTensor;
-
- std::unique_ptr<arm_compute::CLTensor> m_ScratchBuffer;
-
- void FreeUnusedTensors();
-};
-
-arm_compute::Status ClLstmFloat32WorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor &descriptor,
- const TensorInfo& inputToForgetWeights,
- const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights,
- const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights,
- const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias,
- const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights,
- const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias,
- const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias,
- const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights);
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClLstmFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.cpp
index db5c303854..bfa439bba9 100644
--- a/src/armnn/backends/ClWorkloads/ClLstmFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClLstmFloat32Workload.hpp"
+#include "ClLstmFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
@@ -14,7 +14,7 @@ namespace armnn
{
using namespace armcomputetensorutils;
-ClLstmFloat32Workload::ClLstmFloat32Workload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
+ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
: FloatWorkload<LstmQueueDescriptor>(descriptor, info)
{
arm_compute::LSTMParams<arm_compute::ICLTensor> lstm_param;
@@ -228,31 +228,31 @@ ClLstmFloat32Workload::ClLstmFloat32Workload(const LstmQueueDescriptor &descript
FreeUnusedTensors();
}
-void ClLstmFloat32Workload::Execute() const
+void ClLstmFloatWorkload::Execute() const
{
m_LstmLayer.run();
}
-arm_compute::Status ClLstmFloat32WorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights,
- const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights,
- const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights,
- const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias,
- const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights,
- const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias,
- const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias,
- const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights)
+arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
+ const TensorInfo& output, const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights)
{
arm_compute::LSTMParams<arm_compute::ITensorInfo> lstm_params_info;
@@ -380,7 +380,7 @@ arm_compute::Status ClLstmFloat32WorkloadValidate(const TensorInfo& input, const
cell_threshold, projection_threshold);
}
-void ClLstmFloat32Workload::FreeUnusedTensors()
+void ClLstmFloatWorkload::FreeUnusedTensors()
{
FreeTensorIfUnused(m_InputToInputWeightsTensor);
FreeTensorIfUnused(m_InputToForgetWeightsTensor);
diff --git a/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.hpp
new file mode 100644
index 0000000000..47473b2fdf
--- /dev/null
+++ b/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.hpp
@@ -0,0 +1,67 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include "backends/ClWorkloadUtils.hpp"
+#include "backends/Workload.hpp"
+#include "backends/WorkloadData.hpp"
+
+namespace armnn
+{
+
+class ClLstmFloatWorkload : public FloatWorkload<LstmQueueDescriptor>
+{
+public:
+ ClLstmFloatWorkload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info);
+ void Execute() const override;
+
+private:
+ mutable arm_compute::CLLSTMLayer m_LstmLayer;
+
+ std::unique_ptr<arm_compute::CLTensor> m_InputToInputWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_InputToForgetWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_InputToCellWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_InputToOutputWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_RecurrentToInputWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_RecurrentToForgetWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_RecurrentToCellWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_RecurrentToOutputWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_CellToInputWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_CellToForgetWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_CellToOutputWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_InputGateBiasTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_ForgetGateBiasTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_CellBiasTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_OutputGateBiasTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_ProjectionWeightsTensor;
+ std::unique_ptr<arm_compute::CLTensor> m_ProjectionBiasTensor;
+
+ std::unique_ptr<arm_compute::CLTensor> m_ScratchBuffer;
+
+ void FreeUnusedTensors();
+};
+
+arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
+ const TensorInfo& output, const LstmDescriptor &descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights);
+} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.cpp
index 89e7690a36..3d8a183b70 100644
--- a/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.cpp
@@ -3,15 +3,15 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClMergerFloat32Workload.hpp"
+#include "ClMergerFloatWorkload.hpp"
namespace armnn
{
-void ClMergerFloat32Workload::Execute() const
+void ClMergerFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClMergerFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClMergerFloatWorkload_Execute");
ClBaseMergerWorkload::Execute();
}
diff --git a/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.hpp
index 3cafa23c1e..e89f96eb86 100644
--- a/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.hpp
@@ -10,7 +10,7 @@
namespace armnn
{
-class ClMergerFloat32Workload : public ClBaseMergerWorkload<DataType::Float16, DataType::Float32>
+class ClMergerFloatWorkload : public ClBaseMergerWorkload<DataType::Float16, DataType::Float32>
{
public:
using ClBaseMergerWorkload<DataType::Float16, DataType::Float32>::ClBaseMergerWorkload;
diff --git a/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.cpp
index 7aa33146f3..e161a0a8fe 100644
--- a/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClMultiplicationFloat32Workload.hpp"
+#include "ClMultiplicationFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
@@ -30,11 +30,11 @@ arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0,
}
-ClMultiplicationFloat32Workload::ClMultiplicationFloat32Workload(const MultiplicationQueueDescriptor& descriptor,
+ClMultiplicationFloatWorkload::ClMultiplicationFloatWorkload(const MultiplicationQueueDescriptor& descriptor,
const WorkloadInfo& info)
: FloatWorkload<MultiplicationQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("ClMultiplicationFloat32Workload", 2, 1);
+ m_Data.ValidateInputsOutputs("ClMultiplicationFloatWorkload", 2, 1);
arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
@@ -48,9 +48,9 @@ ClMultiplicationFloat32Workload::ClMultiplicationFloat32Workload(const Multiplic
arm_compute::RoundingPolicy::TO_NEAREST_EVEN);
}
-void ClMultiplicationFloat32Workload::Execute() const
+void ClMultiplicationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClMultiplicationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClMultiplicationFloatWorkload_Execute");
// Executes the layer.
m_PixelWiseMultiplication.run();
diff --git a/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.hpp
index 0d6199047d..0da8451378 100644
--- a/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.hpp
@@ -14,10 +14,10 @@ arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output);
-class ClMultiplicationFloat32Workload : public FloatWorkload<MultiplicationQueueDescriptor>
+class ClMultiplicationFloatWorkload : public FloatWorkload<MultiplicationQueueDescriptor>
{
public:
- ClMultiplicationFloat32Workload(const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClMultiplicationFloatWorkload(const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info);
using FloatWorkload<MultiplicationQueueDescriptor>::FloatWorkload;
void Execute() const override;
diff --git a/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.cpp
index d23d6e11bd..bd486e6330 100644
--- a/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClNormalizationFloat32Workload.hpp"
+#include "ClNormalizationFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ClLayerSupport.hpp"
@@ -25,11 +25,11 @@ arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo& input, con
return arm_compute::CLNormalizationLayer::validate(&aclInputInfo, &aclOutputInfo, layerInfo);
}
-ClNormalizationFloat32Workload::ClNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+ClNormalizationFloatWorkload::ClNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
: FloatWorkload<NormalizationQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("ClNormalizationFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClNormalizationFloatWorkload", 1, 1);
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -40,9 +40,9 @@ ClNormalizationFloat32Workload::ClNormalizationFloat32Workload(const Normalizati
m_NormalizationLayer.configure(&input, &output, normalizationInfo);
};
-void ClNormalizationFloat32Workload::Execute() const
+void ClNormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClNormalizationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClNormalizationFloatWorkload_Execute");
m_NormalizationLayer.run();
}
diff --git a/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.hpp
index e8ab0b9a18..4f49e93eab 100644
--- a/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.hpp
@@ -14,15 +14,14 @@ arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor);
-class ClNormalizationFloat32Workload : public FloatWorkload<NormalizationQueueDescriptor>
+class ClNormalizationFloatWorkload : public FloatWorkload<NormalizationQueueDescriptor>
{
public:
- ClNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
private:
mutable arm_compute::CLNormalizationLayer m_NormalizationLayer;
};
-} //namespace armnn
-
+} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.cpp
index 3a5b8ca526..9979e2fd7b 100644
--- a/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.cpp
@@ -3,20 +3,20 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClPooling2dFloat32Workload.hpp"
+#include "ClPooling2dFloatWorkload.hpp"
namespace armnn
{
-ClPooling2dFloat32Workload::ClPooling2dFloat32Workload(const Pooling2dQueueDescriptor& descriptor,
+ClPooling2dFloatWorkload::ClPooling2dFloatWorkload(const Pooling2dQueueDescriptor& descriptor,
const WorkloadInfo& info)
- : ClPooling2dBaseWorkload<DataType::Float16, DataType::Float32>(descriptor, info, "ClPooling2dFloat32Workload")
+ : ClPooling2dBaseWorkload<DataType::Float16, DataType::Float32>(descriptor, info, "ClPooling2dFloatWorkload")
{
}
-void ClPooling2dFloat32Workload::Execute() const
+void ClPooling2dFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClPooling2dFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClPooling2dFloatWorkload_Execute");
m_PoolingLayer.run();
}
diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.hpp
index ad189bdb52..0b73abb70c 100644
--- a/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.hpp
@@ -10,10 +10,10 @@
namespace armnn
{
-class ClPooling2dFloat32Workload : public ClPooling2dBaseWorkload<DataType::Float16, DataType::Float32>
+class ClPooling2dFloatWorkload : public ClPooling2dBaseWorkload<DataType::Float16, DataType::Float32>
{
public:
- ClPooling2dFloat32Workload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClPooling2dFloatWorkload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
};
diff --git a/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.cpp
index 05fba222ac..645544b75d 100644
--- a/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.cpp
@@ -3,17 +3,17 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClReshapeFloat32Workload.hpp"
+#include "ClReshapeFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
namespace armnn
{
-ClReshapeFloat32Workload::ClReshapeFloat32Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info)
+ClReshapeFloatWorkload::ClReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info)
: FloatWorkload<ReshapeQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("ClReshapeFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClReshapeFloatWorkload", 1, 1);
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -21,9 +21,9 @@ ClReshapeFloat32Workload::ClReshapeFloat32Workload(const ReshapeQueueDescriptor&
m_Layer.configure(&input, &output);
}
-void ClReshapeFloat32Workload::Execute() const
+void ClReshapeFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClReshapeFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClReshapeFloatWorkload_Execute");
m_Layer.run();
}
diff --git a/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.hpp
index 0eb4d08da0..18392af99b 100644
--- a/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.hpp
@@ -10,10 +10,10 @@
namespace armnn
{
-class ClReshapeFloat32Workload : public FloatWorkload<ReshapeQueueDescriptor>
+class ClReshapeFloatWorkload : public FloatWorkload<ReshapeQueueDescriptor>
{
public:
- ClReshapeFloat32Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
diff --git a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.cpp
index abef682611..e7e2f70e40 100644
--- a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.cpp
@@ -3,7 +3,7 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClResizeBilinearFloat32Workload.hpp"
+#include "ClResizeBilinearFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ClLayerSupport.hpp"
@@ -12,11 +12,11 @@
namespace armnn
{
-ClResizeBilinearFloat32Workload::ClResizeBilinearFloat32Workload(const ResizeBilinearQueueDescriptor& descriptor,
+ClResizeBilinearFloatWorkload::ClResizeBilinearFloatWorkload(const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info)
: FloatWorkload<ResizeBilinearQueueDescriptor>(descriptor, info)
{
- m_Data.ValidateInputsOutputs("ClResizeBilinearFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClResizeBilinearFloatWorkload", 1, 1);
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
@@ -26,9 +26,9 @@ ClResizeBilinearFloat32Workload::ClResizeBilinearFloat32Workload(const ResizeBil
arm_compute::SamplingPolicy::TOP_LEFT);
};
-void ClResizeBilinearFloat32Workload::Execute() const
+void ClResizeBilinearFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloatWorkload_Execute");
m_ResizeBilinearLayer.run();
}
diff --git a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.hpp
index 81c0566bb3..7527cc68db 100644
--- a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.hpp
@@ -10,10 +10,10 @@
namespace armnn
{
-class ClResizeBilinearFloat32Workload : public FloatWorkload<ResizeBilinearQueueDescriptor>
+class ClResizeBilinearFloatWorkload : public FloatWorkload<ResizeBilinearQueueDescriptor>
{
public:
- ClResizeBilinearFloat32Workload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClResizeBilinearFloatWorkload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
private:
diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.cpp
index 08247bc593..2816d80f74 100644
--- a/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.cpp
@@ -3,28 +3,28 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClSoftmaxFloat32Workload.hpp"
+#include "ClSoftmaxFloatWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
namespace armnn
{
-ClSoftmaxFloat32Workload::ClSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
+ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
, m_SoftmaxLayer(memoryManager)
{
- m_Data.ValidateInputsOutputs("ClSoftmaxFloat32Workload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClSoftmaxFloatWorkload", 1, 1);
arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta);
}
-void ClSoftmaxFloat32Workload::Execute() const
+void ClSoftmaxFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloatWorkload_Execute");
m_SoftmaxLayer.run();
}
diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.hpp
index 6cad59800b..d50ba6f06b 100644
--- a/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.hpp
@@ -14,10 +14,10 @@
namespace armnn
{
-class ClSoftmaxFloat32Workload : public FloatWorkload<SoftmaxQueueDescriptor>
+class ClSoftmaxFloatWorkload : public FloatWorkload<SoftmaxQueueDescriptor>
{
public:
- ClSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
+ ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
void Execute() const override;
diff --git a/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.cpp
index 8a622c6caf..816cc6a1c4 100644
--- a/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.cpp
@@ -3,14 +3,14 @@
// See LICENSE file in the project root for full license information.
//
-#include "ClSplitterFloat32Workload.hpp"
+#include "ClSplitterFloatWorkload.hpp"
namespace armnn
{
-void ClSplitterFloat32Workload::Execute() const
+void ClSplitterFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSplitterFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClSplitterFloatWorkload_Execute");
ClBaseSplitterWorkload::Execute();
}
diff --git a/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.hpp
index affa9f840f..f795473548 100644
--- a/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.hpp
+++ b/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.hpp
@@ -10,7 +10,7 @@
namespace armnn
{
-class ClSplitterFloat32Workload : public ClBaseSplitterWorkload<DataType::Float16, DataType::Float32>
+class ClSplitterFloatWorkload : public ClBaseSplitterWorkload<DataType::Float16, DataType::Float32>
{
public:
using ClBaseSplitterWorkload<DataType::Float16, DataType::Float32>::ClBaseSplitterWorkload;