aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/backends/ClWorkloads
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-09-19 12:03:20 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:56 +0100
commit10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab (patch)
tree1ac5b4f415531e2ef759439ab8e113f177bea7c5 /src/armnn/backends/ClWorkloads
parenta3f165624b2cdfbced674af5a6e11856b1e746d9 (diff)
downloadarmnn-10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab.tar.gz
IVGCVSW-1897 : build infrastructure for the src/backends folder
Change-Id: I7ebafb675ccc77ad54d1deb01412a8379a5356bb
Diffstat (limited to 'src/armnn/backends/ClWorkloads')
-rw-r--r--src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.cpp56
-rw-r--r--src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.hpp29
-rw-r--r--src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp44
-rw-r--r--src/armnn/backends/ClWorkloads/ClActivationUint8Workload.hpp29
-rw-r--r--src/armnn/backends/ClWorkloads/ClAdditionWorkload.cpp66
-rw-r--r--src/armnn/backends/ClWorkloads/ClAdditionWorkload.hpp31
-rw-r--r--src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp64
-rw-r--r--src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.hpp30
-rw-r--r--src/armnn/backends/ClWorkloads/ClBaseMergerWorkload.hpp28
-rw-r--r--src/armnn/backends/ClWorkloads/ClBaseSplitterWorkload.hpp28
-rw-r--r--src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.cpp96
-rw-r--r--src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.hpp46
-rw-r--r--src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.cpp18
-rw-r--r--src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.hpp20
-rw-r--r--src/armnn/backends/ClWorkloads/ClConstantUint8Workload.cpp18
-rw-r--r--src/armnn/backends/ClWorkloads/ClConstantUint8Workload.hpp20
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.cpp66
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.hpp30
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvertFp32ToFp16Workload.cpp66
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvertFp32ToFp16Workload.hpp30
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp48
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.hpp24
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp81
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.hpp35
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp81
-rw-r--r--src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.hpp35
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.cpp125
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.hpp40
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.cpp39
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.hpp26
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp40
-rw-r--r--src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp23
-rw-r--r--src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp48
-rw-r--r--src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.hpp32
-rw-r--r--src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.cpp31
-rw-r--r--src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.hpp30
-rw-r--r--src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.cpp111
-rw-r--r--src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.hpp43
-rw-r--r--src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.cpp49
-rw-r--r--src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.hpp34
-rw-r--r--src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.cpp408
-rw-r--r--src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.hpp68
-rw-r--r--src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.cpp20
-rw-r--r--src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.hpp22
-rw-r--r--src/armnn/backends/ClWorkloads/ClMergerUint8Workload.cpp19
-rw-r--r--src/armnn/backends/ClWorkloads/ClMergerUint8Workload.hpp21
-rw-r--r--src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.cpp60
-rw-r--r--src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.hpp34
-rw-r--r--src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.cpp50
-rw-r--r--src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.hpp29
-rw-r--r--src/armnn/backends/ClWorkloads/ClPermuteWorkload.cpp56
-rw-r--r--src/armnn/backends/ClWorkloads/ClPermuteWorkload.hpp42
-rw-r--r--src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.cpp47
-rw-r--r--src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.hpp33
-rw-r--r--src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.cpp26
-rw-r--r--src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.hpp22
-rw-r--r--src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.cpp27
-rw-r--r--src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.hpp25
-rw-r--r--src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.cpp33
-rw-r--r--src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.hpp28
-rw-r--r--src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.cpp31
-rw-r--r--src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.hpp29
-rw-r--r--src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.cpp38
-rw-r--r--src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.hpp25
-rw-r--r--src/armnn/backends/ClWorkloads/ClSoftmaxBaseWorkload.cpp30
-rw-r--r--src/armnn/backends/ClWorkloads/ClSoftmaxBaseWorkload.hpp17
-rw-r--r--src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.cpp33
-rw-r--r--src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.hpp30
-rw-r--r--src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.cpp43
-rw-r--r--src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.hpp31
-rw-r--r--src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.cpp19
-rw-r--r--src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.hpp20
-rw-r--r--src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.cpp19
-rw-r--r--src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.hpp21
-rw-r--r--src/armnn/backends/ClWorkloads/ClSubtractionWorkload.cpp66
-rw-r--r--src/armnn/backends/ClWorkloads/ClSubtractionWorkload.hpp31
-rw-r--r--src/armnn/backends/ClWorkloads/ClWorkloadUtils.hpp62
77 files changed, 0 insertions, 3405 deletions
diff --git a/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.cpp
deleted file mode 100644
index 97078bddd8..0000000000
--- a/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClActivationFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/ArmComputeUtils.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-arm_compute::Status ClActivationWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const ActivationDescriptor& descriptor)
-{
- const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
-
- const arm_compute::ActivationLayerInfo activationLayerInfo =
- ConvertActivationDescriptorToAclActivationLayerInfo(descriptor);
-
- if (input.GetDataType() == DataType::QuantisedAsymm8 &&
- activationLayerInfo.activation() == arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC)
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "CL: Logistic Activations unsupported with QAsymm8 data type."};
- }
-
- return arm_compute::CLActivationLayer::validate(&aclInput,
- &aclOutput,
- activationLayerInfo);
-}
-
-ClActivationFloatWorkload::ClActivationFloatWorkload(const ActivationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<ActivationQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClActivationFloatWorkload", 1, 1);
-
- const arm_compute::ActivationLayerInfo activationLayerInfo =
- ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters);
-
- arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- m_ActivationLayer.configure(&input, &output, activationLayerInfo);
-}
-
-void ClActivationFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClActivationFloatWorkload_Execute");
- m_ActivationLayer.run();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.hpp
deleted file mode 100644
index e1b6fe13d8..0000000000
--- a/src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.hpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-arm_compute::Status ClActivationWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const ActivationDescriptor& descriptor);
-
-// Activation layer execution.
-class ClActivationFloatWorkload : public FloatWorkload<ActivationQueueDescriptor>
-{
-public:
- ClActivationFloatWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLActivationLayer m_ActivationLayer;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp
deleted file mode 100644
index f39c856aa9..0000000000
--- a/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClActivationUint8Workload.hpp"
-#include "backends/ClLayerSupport.hpp"
-
-#include "backends/ArmComputeUtils.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClActivationUint8Workload::ClActivationUint8Workload(const ActivationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : Uint8Workload<ActivationQueueDescriptor>(descriptor, info)
-{
- auto activation = ConvertActivationFunctionToAclActivationFunction(m_Data.m_Parameters.m_Function);
- arm_compute::ActivationLayerInfo layerInfo(activation,
- m_Data.m_Parameters.m_A,
- m_Data.m_Parameters.m_B);
-
- m_Data.ValidateInputsOutputs("ClActivationUint8Workload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_ActivationLayer.configure(&input, &output, layerInfo);
-}
-
-void ClActivationUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClActivationUint8Workload_Execute");
-
- m_ActivationLayer.run();
-}
-
-} //namespace Armnn
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.hpp
deleted file mode 100644
index bb2ff58853..0000000000
--- a/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.hpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-// Activation layer execution.
-class ClActivationUint8Workload : public Uint8Workload<ActivationQueueDescriptor>
-{
-public:
- ClActivationUint8Workload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLActivationLayer m_ActivationLayer;
-};
-
-} //namespace armnn
-
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClAdditionWorkload.cpp b/src/armnn/backends/ClWorkloads/ClAdditionWorkload.cpp
deleted file mode 100644
index dd439d59a9..0000000000
--- a/src/armnn/backends/ClWorkloads/ClAdditionWorkload.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClAdditionWorkload.hpp"
-
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
-
-template <armnn::DataType... T>
-ClAdditionWorkload<T...>::ClAdditionWorkload(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : TypedWorkload<AdditionQueueDescriptor, T...>(descriptor, info)
-{
- this->m_Data.ValidateInputsOutputs("ClAdditionWorkload", 2, 1);
-
- arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
- m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy);
-}
-
-template <armnn::DataType... T>
-void ClAdditionWorkload<T...>::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionWorkload_Execute");
- m_Layer.run();
-}
-
-bool ClAdditionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
-{
- const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
- const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-
- const arm_compute::Status aclStatus = arm_compute::CLArithmeticAddition::validate(&aclInput0Info,
- &aclInput1Info,
- &aclOutputInfo,
- g_AclConvertPolicy);
-
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
- return supported;
-}
-
-} //namespace armnn
-
-template class armnn::ClAdditionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>;
-template class armnn::ClAdditionWorkload<armnn::DataType::QuantisedAsymm8>;
diff --git a/src/armnn/backends/ClWorkloads/ClAdditionWorkload.hpp b/src/armnn/backends/ClWorkloads/ClAdditionWorkload.hpp
deleted file mode 100644
index b4706890d1..0000000000
--- a/src/armnn/backends/ClWorkloads/ClAdditionWorkload.hpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-template <armnn::DataType... dataTypes>
-class ClAdditionWorkload : public TypedWorkload<AdditionQueueDescriptor, dataTypes...>
-{
-public:
- ClAdditionWorkload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- void Execute() const override;
-
-private:
- mutable arm_compute::CLArithmeticAddition m_Layer;
-};
-
-bool ClAdditionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp b/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp
deleted file mode 100644
index 021d17512f..0000000000
--- a/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClBaseConstantWorkload.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "Half.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-template class ClBaseConstantWorkload<DataType::Float16, DataType::Float32>;
-template class ClBaseConstantWorkload<DataType::QuantisedAsymm8>;
-
-template<armnn::DataType... dataTypes>
-void ClBaseConstantWorkload<dataTypes...>::Execute() const
-{
- // The intermediate tensor held by the corresponding layer output handler can be initialised with the given data
- // on the first inference, then reused for subsequent inferences.
- // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer may not
- // have been configured at the time.
- if (!m_RanOnce)
- {
- const ConstantQueueDescriptor& data = this->m_Data;
-
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
- arm_compute::CLTensor& output = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetTensor();
- arm_compute::DataType computeDataType = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetDataType();
-
- switch (computeDataType)
- {
- case arm_compute::DataType::F16:
- {
- CopyArmComputeClTensorData(data.m_LayerOutput->GetConstTensor<Half>(), output);
- break;
- }
- case arm_compute::DataType::F32:
- {
- CopyArmComputeClTensorData(data.m_LayerOutput->GetConstTensor<float>(), output);
- break;
- }
- case arm_compute::DataType::QASYMM8:
- {
- CopyArmComputeClTensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
- break;
- }
- default:
- {
- BOOST_ASSERT_MSG(false, "Unknown data type");
- break;
- }
- }
-
- m_RanOnce = true;
- }
-}
-
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.hpp b/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.hpp
deleted file mode 100644
index ca1db389dc..0000000000
--- a/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-template <armnn::DataType... DataTypes>
-class ClBaseConstantWorkload : public TypedWorkload<ConstantQueueDescriptor, DataTypes...>
-{
-public:
- ClBaseConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info)
- : TypedWorkload<ConstantQueueDescriptor, DataTypes...>(descriptor, info)
- , m_RanOnce(false)
- {
- }
-
- void Execute() const override;
-
-private:
- mutable bool m_RanOnce;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClBaseMergerWorkload.hpp b/src/armnn/backends/ClWorkloads/ClBaseMergerWorkload.hpp
deleted file mode 100644
index 420e074217..0000000000
--- a/src/armnn/backends/ClWorkloads/ClBaseMergerWorkload.hpp
+++ /dev/null
@@ -1,28 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-// Base class template providing an implementation of the Merger layer common to all data types.
-template <armnn::DataType... DataTypes>
-class ClBaseMergerWorkload : public TypedWorkload<MergerQueueDescriptor, DataTypes...>
-{
-public:
- using TypedWorkload<MergerQueueDescriptor, DataTypes...>::TypedWorkload;
-
- void Execute() const override
- {
- // With subtensors, merger is a no-op.
- }
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClBaseSplitterWorkload.hpp b/src/armnn/backends/ClWorkloads/ClBaseSplitterWorkload.hpp
deleted file mode 100644
index 41f382cac8..0000000000
--- a/src/armnn/backends/ClWorkloads/ClBaseSplitterWorkload.hpp
+++ /dev/null
@@ -1,28 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-// Base class template providing an implementation of the Splitter layer common to all data types.
-template <armnn::DataType... DataTypes>
-class ClBaseSplitterWorkload : public TypedWorkload<SplitterQueueDescriptor, DataTypes...>
-{
-public:
- using TypedWorkload<SplitterQueueDescriptor, DataTypes...>::TypedWorkload;
-
- void Execute() const override
- {
- // With subtensors, merger is a no-op.
- }
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.cpp
deleted file mode 100644
index 021734aaa6..0000000000
--- a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClBatchNormalizationFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-#include "backends/ClLayerSupport.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-arm_compute::Status ClBatchNormalizationValidate(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& mean,
- const TensorInfo& var,
- const TensorInfo& beta,
- const TensorInfo& gamma,
- const BatchNormalizationDescriptor &desc)
-{
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
- const arm_compute::TensorInfo aclMeanInfo = BuildArmComputeTensorInfo(mean);
- const arm_compute::TensorInfo aclVarInfo = BuildArmComputeTensorInfo(var);
- const arm_compute::TensorInfo aclBetaInfo = BuildArmComputeTensorInfo(beta);
- const arm_compute::TensorInfo aclGammaInfo = BuildArmComputeTensorInfo(gamma);
-
- return arm_compute::CLBatchNormalizationLayer::validate(&aclInputInfo,
- &aclOutputInfo,
- &aclMeanInfo,
- &aclVarInfo,
- &aclBetaInfo,
- &aclGammaInfo,
- desc.m_Eps);
-}
-
-ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload(
- const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info)
- : FloatWorkload<BatchNormalizationQueueDescriptor>(descriptor, info)
-{
- m_Mean = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo());
-
- m_Variance = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo());
-
- m_Gamma = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo());
-
- m_Beta = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo());
-
- m_Data.ValidateInputsOutputs("ClBatchNormalizationFloatWorkload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_Layer.configure(&input,
- &output,
- m_Mean.get(),
- m_Variance.get(),
- m_Beta.get(),
- m_Gamma.get(),
- m_Data.m_Parameters.m_Eps);
-
- InitializeArmComputeClTensorDataForFloatTypes(*m_Mean, m_Data.m_Mean);
- InitializeArmComputeClTensorDataForFloatTypes(*m_Variance, m_Data.m_Variance);
- InitializeArmComputeClTensorDataForFloatTypes(*m_Beta, m_Data.m_Beta);
- InitializeArmComputeClTensorDataForFloatTypes(*m_Gamma, m_Data.m_Gamma);
-
- // Force Compute Library to perform the necessary copying and reshaping, after which
- // delete all the input tensors that will no longer be needed
- m_Layer.prepare();
- FreeUnusedTensors();
-}
-
-void ClBatchNormalizationFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClBatchNormalizationFloatWorkload_Execute");
- m_Layer.run();
-}
-
-void ClBatchNormalizationFloatWorkload::FreeUnusedTensors()
-{
- FreeTensorIfUnused(m_Mean);
- FreeTensorIfUnused(m_Variance);
- FreeTensorIfUnused(m_Gamma);
- FreeTensorIfUnused(m_Beta);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.hpp
deleted file mode 100644
index 22c71b1073..0000000000
--- a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.hpp
+++ /dev/null
@@ -1,46 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClBatchNormalizationValidate(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& mean,
- const TensorInfo& var,
- const TensorInfo& beta,
- const TensorInfo& gamma,
- const BatchNormalizationDescriptor& desc);
-
-class ClBatchNormalizationFloatWorkload : public FloatWorkload<BatchNormalizationQueueDescriptor>
-{
-public:
- ClBatchNormalizationFloatWorkload(const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- using FloatWorkload<BatchNormalizationQueueDescriptor>::FloatWorkload;
- void Execute() const override;
-
-private:
- mutable arm_compute::CLBatchNormalizationLayer m_Layer;
-
- std::unique_ptr<arm_compute::CLTensor> m_Mean;
- std::unique_ptr<arm_compute::CLTensor> m_Variance;
- std::unique_ptr<arm_compute::CLTensor> m_Gamma;
- std::unique_ptr<arm_compute::CLTensor> m_Beta;
-
- void FreeUnusedTensors();
-};
-
-} //namespace armnn
-
-
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.cpp
deleted file mode 100644
index 1565047c22..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConstantFloatWorkload.hpp"
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-void ClConstantFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClConstantFloatWorkload_Execute");
- ClBaseConstantWorkload::Execute();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.hpp
deleted file mode 100644
index 0cbeaad9ea..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClBaseConstantWorkload.hpp"
-
-namespace armnn
-{
-class ClConstantFloatWorkload : public ClBaseConstantWorkload<DataType::Float16, DataType::Float32>
-{
-public:
- using ClBaseConstantWorkload<DataType::Float16, DataType::Float32>::ClBaseConstantWorkload;
- void Execute() const override;
-};
-
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.cpp
deleted file mode 100644
index a5ef0321cd..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConstantUint8Workload.hpp"
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-void ClConstantUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClConstantUint8Workload_Execute");
- ClBaseConstantWorkload::Execute();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.hpp
deleted file mode 100644
index 30556dc0d6..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClBaseConstantWorkload.hpp"
-
-namespace armnn
-{
-
-class ClConstantUint8Workload : public ClBaseConstantWorkload<DataType::QuantisedAsymm8>
-{
-public:
- using ClBaseConstantWorkload<DataType::QuantisedAsymm8>::ClBaseConstantWorkload;
- void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.cpp b/src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.cpp
deleted file mode 100644
index 534249aeac..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConvertFp16ToFp32Workload.hpp"
-#include "backends/ClTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
-
-ClConvertFp16ToFp32Workload::ClConvertFp16ToFp32Workload(
- const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) :
- Float16ToFloat32Workload<ConvertFp16ToFp32QueueDescriptor>(descriptor, info)
-{
- this->m_Data.ValidateInputsOutputs("ClConvertFp16ToFp32Workload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
-
- m_Layer.configure(&input, &output, g_AclConvertPolicy, 0);
-}
-
-void ClConvertFp16ToFp32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp16ToFp32Workload_Execute");
- m_Layer.run();
-}
-
-arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
-{
- if (input.GetDataType() != DataType::Float16)
- {
- *reasonIfUnsupported = "Input should be Float16";
- return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
- }
- if (output.GetDataType() != DataType::Float32)
- {
- *reasonIfUnsupported = "Output should be Float32";
- return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
- }
-
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-
- const arm_compute::Status aclStatus = arm_compute::CLDepthConvertLayer::validate(
- &aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0);
-
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
- return aclStatus;
-}
-
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.hpp b/src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.hpp
deleted file mode 100644
index c72d2262b3..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-class ClConvertFp16ToFp32Workload : public Float16ToFloat32Workload<ConvertFp16ToFp32QueueDescriptor>
-{
-public:
-
- ClConvertFp16ToFp32Workload(const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info);
- virtual void Execute() const override;
-
-private:
- mutable arm_compute::CLDepthConvertLayer m_Layer;
-};
-
-arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConvertFp32ToFp16Workload.cpp b/src/armnn/backends/ClWorkloads/ClConvertFp32ToFp16Workload.cpp
deleted file mode 100644
index 73b3cbc542..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvertFp32ToFp16Workload.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConvertFp32ToFp16Workload.hpp"
-#include "backends/ClTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
-
-ClConvertFp32ToFp16Workload::ClConvertFp32ToFp16Workload(
- const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info) :
- Float32ToFloat16Workload<ConvertFp32ToFp16QueueDescriptor>(descriptor, info)
-{
- this->m_Data.ValidateInputsOutputs("ClConvertFp32ToFp16Workload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
-
- m_Layer.configure(&input, &output, g_AclConvertPolicy, 0);
-}
-
-void ClConvertFp32ToFp16Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvertFp32ToFp16Workload_Execute");
- m_Layer.run();
-}
-
-arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
-{
- if (input.GetDataType() != DataType::Float32)
- {
- *reasonIfUnsupported = "Input should be Float32";
- return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
- }
- if (output.GetDataType() != DataType::Float16)
- {
- *reasonIfUnsupported = "Output should be Float16";
- return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported);
- }
-
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-
- const arm_compute::Status aclStatus = arm_compute::CLDepthConvertLayer::validate(
- &aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0);
-
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
- return aclStatus;
-}
-
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConvertFp32ToFp16Workload.hpp b/src/armnn/backends/ClWorkloads/ClConvertFp32ToFp16Workload.hpp
deleted file mode 100644
index fb6af02070..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvertFp32ToFp16Workload.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-class ClConvertFp32ToFp16Workload : public Float32ToFloat16Workload<ConvertFp32ToFp16QueueDescriptor>
-{
-public:
-
- ClConvertFp32ToFp16Workload(const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info);
- virtual void Execute() const override;
-
-private:
- mutable arm_compute::CLDepthConvertLayer m_Layer;
-};
-
-arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp b/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp
deleted file mode 100644
index 228f17d54e..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConvolution2dBaseWorkload.hpp"
-#include "backends/ClLayerSupport.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/ArmComputeUtils.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-
-#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases)
-{
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
- const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights);
-
- arm_compute::TensorInfo aclBiasesInfo;
- arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
-
- if (descriptor.m_BiasEnabled)
- {
- BOOST_ASSERT(biases.is_initialized());
-
- aclBiasesInfo = BuildArmComputeTensorInfo(biases.get());
- optionalAclBiasesInfo = &aclBiasesInfo;
- }
-
- arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
-
- return arm_compute::CLConvolutionLayer::validate(&aclInputInfo,
- &aclWeightsInfo,
- optionalAclBiasesInfo,
- &aclOutputInfo,
- layerInfo);
-}
-
-}
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.hpp b/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.hpp
deleted file mode 100644
index a983dba79a..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dBaseWorkload.hpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/Tensor.hpp>
-#include <armnn/Descriptors.hpp>
-
-#include <boost/optional.hpp>
-
-#include <arm_compute/core/Error.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases);
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp
deleted file mode 100644
index 029f41d5dc..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConvolution2dFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-#include "backends/ClLayerSupport.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-ClConvolution2dFloatWorkload::ClConvolution2dFloatWorkload(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
- : FloatWorkload<Convolution2dQueueDescriptor>(descriptor, info)
- , m_ConvolutionLayer(memoryManager)
-{
-
- // todo: check tensor shapes match.
- const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
-
- m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightInfo);
-
- arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
- m_Data.m_Parameters.m_StrideY,
- m_Data.m_Parameters.m_PadLeft,
- m_Data.m_Parameters.m_PadRight,
- m_Data.m_Parameters.m_PadTop,
- m_Data.m_Parameters.m_PadBottom,
- arm_compute::DimensionRoundingType::FLOOR);
-
- if (m_Data.m_Parameters.m_BiasEnabled)
- {
- m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo());
- }
-
- m_Data.ValidateInputsOutputs("ClConvolution2dFloat32Workload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_ConvolutionLayer.configure(&input,
- m_KernelTensor.get(),
- m_BiasTensor.get(),
- &output,
- padStrideInfo);
-
- InitializeArmComputeClTensorDataForFloatTypes(*m_KernelTensor, m_Data.m_Weight);
-
- if (m_BiasTensor)
- {
- InitializeArmComputeClTensorDataForFloatTypes(*m_BiasTensor, m_Data.m_Bias);
- }
-
- // Force Compute Library to perform the necessary copying and reshaping, after which
- // delete all the input tensors that will no longer be needed
- m_ConvolutionLayer.prepare();
- FreeUnusedTensors();
-}
-
-void ClConvolution2dFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dFloat32Workload_Execute");
-
- m_ConvolutionLayer.run();
-}
-
-void ClConvolution2dFloatWorkload::FreeUnusedTensors()
-{
- FreeTensorIfUnused(m_KernelTensor);
- FreeTensorIfUnused(m_BiasTensor);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.hpp
deleted file mode 100644
index 28ba53f38a..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dFloatWorkload.hpp
+++ /dev/null
@@ -1,35 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-
-#include <memory>
-
-namespace armnn
-{
-
-class ClConvolution2dFloatWorkload : public FloatWorkload<Convolution2dQueueDescriptor>
-{
-public:
- ClConvolution2dFloatWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer;
-
- std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
- std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
-
- void FreeUnusedTensors();
-};
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp
deleted file mode 100644
index e6783b698a..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConvolution2dUint8Workload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-#include "backends/ClLayerSupport.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-ClConvolution2dUint8Workload::ClConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
- : Uint8Workload<Convolution2dQueueDescriptor>(descriptor, info)
- , m_ConvolutionLayer(memoryManager)
-{
- // todo: check tensor shapes match
- const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
-
- m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightInfo);
-
- arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
- m_Data.m_Parameters.m_StrideY,
- m_Data.m_Parameters.m_PadLeft,
- m_Data.m_Parameters.m_PadRight,
- m_Data.m_Parameters.m_PadTop,
- m_Data.m_Parameters.m_PadBottom,
- arm_compute::DimensionRoundingType::FLOOR);
-
- if (m_Data.m_Parameters.m_BiasEnabled)
- {
- m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo());
- }
-
- m_Data.ValidateInputsOutputs("ClConvolution2dUint8Workload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_ConvolutionLayer.configure(&input,
- m_KernelTensor.get(),
- m_BiasTensor.get(),
- &output,
- padStrideInfo);
-
- InitialiseArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight->GetConstTensor<uint8_t>());
-
- if (m_BiasTensor)
- {
- InitialiseArmComputeClTensorData(*m_BiasTensor, m_Data.m_Bias->GetConstTensor<int32_t>());
- }
-
- // Force Compute Library to perform the necessary copying and reshaping, after which
- // delete all the input tensors that will no longer be needed
- m_ConvolutionLayer.prepare();
- FreeUnusedTensors();
-}
-
-void ClConvolution2dUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dUint8Workload_Execute");
-
- m_ConvolutionLayer.run();
-}
-
-void ClConvolution2dUint8Workload::FreeUnusedTensors()
-{
- FreeTensorIfUnused(m_KernelTensor);
- FreeTensorIfUnused(m_BiasTensor);
-}
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.hpp
deleted file mode 100644
index f1f008b1b9..0000000000
--- a/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.hpp
+++ /dev/null
@@ -1,35 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-
-#include <memory>
-
-namespace armnn
-{
-
-class ClConvolution2dUint8Workload : public Uint8Workload<Convolution2dQueueDescriptor>
-{
-public:
- ClConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer;
-
- std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
- std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
-
- void FreeUnusedTensors();
-};
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.cpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.cpp
deleted file mode 100644
index 0e89a68118..0000000000
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClDepthwiseConvolutionBaseWorkload.hpp"
-
-#include "TypeUtils.hpp"
-
-#include "backends/ArmComputeUtils.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-
-namespace armnn
-{
-
-using namespace armcomputetensorutils;
-
-arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases)
-{
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
- const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights);
-
- arm_compute::TensorInfo aclBiasesInfo;
- arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
-
- if (descriptor.m_BiasEnabled)
- {
- BOOST_ASSERT(biases.is_initialized());
-
- aclBiasesInfo = BuildArmComputeTensorInfo(biases.get());
- optionalAclBiasesInfo = &aclBiasesInfo;
- }
-
- const arm_compute::PadStrideInfo aclPadStrideInfo = BuildArmComputePadStrideInfo(descriptor);
- const unsigned int aclDepthMultiplier = weights.GetShape()[0];
-
- return arm_compute::CLDepthwiseConvolutionLayer::validate(&aclInputInfo,
- &aclWeightsInfo,
- optionalAclBiasesInfo,
- &aclOutputInfo,
- aclPadStrideInfo,
- aclDepthMultiplier);
-}
-
-template<armnn::DataType... dataTypes>
-ClDepthwiseConvolutionBaseWorkload<dataTypes...>::ClDepthwiseConvolutionBaseWorkload(
- const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : TypedWorkload<DepthwiseConvolution2dQueueDescriptor, dataTypes...>(descriptor, info)
-{
- auto& weightInfo = m_Data.m_Weight->GetTensorInfo();
-
- m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightInfo);
-
- if (m_Data.m_Parameters.m_BiasEnabled)
- {
- m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo());
- }
-
- arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
- m_Data.m_Parameters.m_StrideY,
- m_Data.m_Parameters.m_PadLeft,
- m_Data.m_Parameters.m_PadRight,
- m_Data.m_Parameters.m_PadTop,
- m_Data.m_Parameters.m_PadBottom,
- arm_compute::DimensionRoundingType::FLOOR);
-
- std::string name = std::string("ClDepthwiseConvolution") +
- GetDataTypeName(m_Data.m_Weight->GetTensorInfo().GetDataType()) + "Workload";
- m_Data.ValidateInputsOutputs(name, 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- const unsigned int depthMultiplier = weightInfo.GetShape()[0];
-
- //Check for optimisation opportunities.
- bool use3x3Optimisation = (weightInfo.GetShape()[3] == 3) && (weightInfo.GetShape()[2] == 3);
- if (use3x3Optimisation)
- {
- m_DepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer3x3>();
- static_cast<arm_compute::CLDepthwiseConvolutionLayer3x3*>(m_DepthwiseConvolutionLayer.get())->configure(
- &input,
- m_KernelTensor.get(),
- m_BiasTensor.get(),
- &output,
- padStrideInfo,
- depthMultiplier);
- }
- else
- {
- m_DepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer>();
- static_cast<arm_compute::CLDepthwiseConvolutionLayer*>(m_DepthwiseConvolutionLayer.get())->configure(
- &input,
- m_KernelTensor.get(),
- m_BiasTensor.get(),
- &output,
- padStrideInfo,
- depthMultiplier);
- }
-
- BOOST_ASSERT(m_DepthwiseConvolutionLayer);
-}
-
-template<armnn::DataType... dataTypes>
-void ClDepthwiseConvolutionBaseWorkload<dataTypes...>::FreeUnusedTensors()
-{
- FreeTensorIfUnused(m_KernelTensor);
- FreeTensorIfUnused(m_BiasTensor);
-}
-
-// Generate known implementations for linker
-template class ClDepthwiseConvolutionBaseWorkload<DataType::Float16, DataType::Float32>;
-template class ClDepthwiseConvolutionBaseWorkload<DataType::QuantisedAsymm8>;
-
-} // namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.hpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.hpp
deleted file mode 100644
index 49a8b5d357..0000000000
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionBaseWorkload.hpp
+++ /dev/null
@@ -1,40 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include <boost/optional.hpp>
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases);
-
-template<armnn::DataType... dataTypes>
-class ClDepthwiseConvolutionBaseWorkload : public TypedWorkload<DepthwiseConvolution2dQueueDescriptor, dataTypes...>
-{
-public:
- using TypedWorkload<DepthwiseConvolution2dQueueDescriptor, dataTypes...>::m_Data;
-
- ClDepthwiseConvolutionBaseWorkload(const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info);
-
-protected:
- std::unique_ptr<arm_compute::IFunction> m_DepthwiseConvolutionLayer;
-
- std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
- std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
-
- void FreeUnusedTensors();
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.cpp
deleted file mode 100644
index 635ae1f327..0000000000
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClDepthwiseConvolutionFloatWorkload.hpp"
-
-#include "backends/CpuTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClDepthwiseConvolutionFloatWorkload::ClDepthwiseConvolutionFloatWorkload(
- const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : ClDepthwiseConvolutionBaseWorkload(descriptor, info)
-{
- InitializeArmComputeClTensorDataForFloatTypes(*m_KernelTensor, m_Data.m_Weight);
-
- if (m_BiasTensor)
- {
- InitializeArmComputeClTensorDataForFloatTypes(*m_BiasTensor, m_Data.m_Bias);
- }
-
- m_DepthwiseConvolutionLayer->prepare();
- FreeUnusedTensors();
-}
-
-void ClDepthwiseConvolutionFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionFloatWorkload_Execute");
- BOOST_ASSERT(m_DepthwiseConvolutionLayer);
-
- m_DepthwiseConvolutionLayer->run();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.hpp
deleted file mode 100644
index 4f9d5f332e..0000000000
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloatWorkload.hpp
+++ /dev/null
@@ -1,26 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClDepthwiseConvolutionBaseWorkload.hpp"
-
-namespace armnn
-{
-
-class ClDepthwiseConvolutionFloatWorkload : public ClDepthwiseConvolutionBaseWorkload<DataType::Float16,
- DataType::Float32>
-{
-public:
- ClDepthwiseConvolutionFloatWorkload(const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info);
- void Execute() const override;
-};
-
-} //namespace armnn
-
-
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp
deleted file mode 100644
index af5836e908..0000000000
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClDepthwiseConvolutionUint8Workload.hpp"
-
-#include "backends/CpuTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClDepthwiseConvolutionUint8Workload::ClDepthwiseConvolutionUint8Workload(
- const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : ClDepthwiseConvolutionBaseWorkload(descriptor, info)
-{
- InitialiseArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<uint8_t>());
-
- if (m_BiasTensor)
- {
- InitialiseArmComputeClTensorData(*m_BiasTensor, m_Data.m_Bias->template GetConstTensor<int32_t>());
- }
-
- m_DepthwiseConvolutionLayer->prepare();
- FreeUnusedTensors();
-}
-
-void ClDepthwiseConvolutionUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionUint8Workload_Execute");
- BOOST_ASSERT(m_DepthwiseConvolutionLayer);
-
- m_DepthwiseConvolutionLayer->run();
-}
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp
deleted file mode 100644
index b9f676de94..0000000000
--- a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClDepthwiseConvolutionBaseWorkload.hpp"
-
-namespace armnn
-{
-
-class ClDepthwiseConvolutionUint8Workload : public ClDepthwiseConvolutionBaseWorkload<DataType::QuantisedAsymm8>
-{
-public:
- ClDepthwiseConvolutionUint8Workload(const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info);
- void Execute() const override;
-};
-
-} //namespace armnn
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp
deleted file mode 100644
index 2371789035..0000000000
--- a/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClDivisionFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output)
-{
- const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
- const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
- const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
-
- return arm_compute::CLArithmeticDivision::validate(&aclInput1, &aclInput2, &aclOutput);
-}
-
-
-ClDivisionFloatWorkload::ClDivisionFloatWorkload(const DivisionQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<DivisionQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClDivisionFloatWorkload", 2, 1);
-
- arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- // Construct
- m_ArithmeticDivision.configure(&input0, &input1, &output);
-}
-
-void ClDivisionFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClDivisionFloatWorkload_Execute");
-
- // Executes the layer.
- m_ArithmeticDivision.run();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.hpp
deleted file mode 100644
index d34e11dab8..0000000000
--- a/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output);
-
-class ClDivisionFloatWorkload : public FloatWorkload<DivisionQueueDescriptor>
-{
-public:
- ClDivisionFloatWorkload(const DivisionQueueDescriptor& descriptor, const
- WorkloadInfo& info);
-
- using FloatWorkload<DivisionQueueDescriptor>::FloatWorkload;
- void Execute() const override;
-
-private:
- mutable arm_compute::CLArithmeticDivision m_ArithmeticDivision;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.cpp
deleted file mode 100644
index d090a7da81..0000000000
--- a/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClFloorFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClFloorFloatWorkload::ClFloorFloatWorkload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info)
- : FloatWorkload<FloorQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClFloorFloatWorkload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_Layer.configure(&input, &output);
-}
-
-void ClFloorFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClFloorFloatWorkload_Execute");
- m_Layer.run();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.hpp
deleted file mode 100644
index f269bcf30c..0000000000
--- a/src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-class ClFloorFloatWorkload : public FloatWorkload<FloorQueueDescriptor>
-{
-public:
- ClFloorFloatWorkload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- void Execute() const override;
-
-private:
- mutable arm_compute::CLFloor m_Layer;
-};
-
-} //namespace armnn
-
-
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.cpp b/src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.cpp
deleted file mode 100644
index 8d2fd0e909..0000000000
--- a/src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClFullyConnectedWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-#include "backends/ArmComputeUtils.hpp"
-#include "backends/ClLayerSupport.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& weights,
- const TensorInfo& biases,
- const FullyConnectedDescriptor& descriptor)
-{
- const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
- const arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
-
- arm_compute::TensorInfo aclBiases;
- arm_compute::TensorInfo *optionalAclBiases = nullptr;
- if (descriptor.m_BiasEnabled)
- {
- aclBiases = BuildArmComputeTensorInfo(biases);
- optionalAclBiases = &aclBiases;
- }
-
- const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
- ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor);
-
- return arm_compute::CLFullyConnectedLayer::validate(&aclInput,
- &aclWeights,
- optionalAclBiases,
- &aclOutput,
- fullyConnectedLayerInfo);
-}
-
-ClFullyConnectedWorkload::ClFullyConnectedWorkload(const FullyConnectedQueueDescriptor& descriptor,
- const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
- : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
- , m_FullyConnectedLayer(memoryManager)
-{
- m_WeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
-
- if (m_Data.m_Parameters.m_BiasEnabled)
- {
- m_BiasesTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
- }
-
- m_Data.ValidateInputsOutputs("ClFullyConnectedWorkload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- // Construct
- arm_compute::FullyConnectedLayerInfo fc_info;
- fc_info.transpose_weights = m_Data.m_Parameters.m_TransposeWeightMatrix;
- m_FullyConnectedLayer.configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
-
- // Allocate
- if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
- {
- InitialiseArmComputeClTensorData(*m_WeightsTensor, m_Data.m_Weight->GetConstTensor<uint8_t>());
- }
- else
- {
- InitializeArmComputeClTensorDataForFloatTypes(*m_WeightsTensor, m_Data.m_Weight);
- }
-
- if (m_BiasesTensor)
- {
- if (m_Data.m_Bias->GetTensorInfo().GetDataType() == DataType::Signed32)
- {
- InitialiseArmComputeClTensorData(*m_BiasesTensor, m_Data.m_Bias->GetConstTensor<int32_t>());
- }
- else
- {
- InitializeArmComputeClTensorDataForFloatTypes(*m_BiasesTensor, m_Data.m_Bias);
- }
- }
-
- // Force Compute Library to perform the necessary copying and reshaping, after which
- // delete all the input tensors that will no longer be needed
- m_FullyConnectedLayer.prepare();
- FreeUnusedTensors();
-}
-
-void ClFullyConnectedWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedWorkload_Execute");
- m_FullyConnectedLayer.run();
-}
-
-void ClFullyConnectedWorkload::FreeUnusedTensors()
-{
- FreeTensorIfUnused(m_WeightsTensor);
- FreeTensorIfUnused(m_BiasesTensor);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.hpp b/src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.hpp
deleted file mode 100644
index a61610992e..0000000000
--- a/src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.hpp
+++ /dev/null
@@ -1,43 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-
-#include <memory>
-
-namespace armnn
-{
-
-arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& weights,
- const TensorInfo& biases,
- const FullyConnectedDescriptor& descriptor);
-
-class ClFullyConnectedWorkload : public armnn::BaseWorkload<armnn::FullyConnectedQueueDescriptor>
-{
-public:
- ClFullyConnectedWorkload(const armnn::FullyConnectedQueueDescriptor& descriptor,
- const armnn::WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
-
- using armnn::BaseWorkload<armnn::FullyConnectedQueueDescriptor>::m_Data;
- void Execute() const override;
-
-private:
- mutable arm_compute::CLFullyConnectedLayer m_FullyConnectedLayer;
-
- std::unique_ptr<arm_compute::CLTensor> m_WeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_BiasesTensor;
-
- void FreeUnusedTensors();
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.cpp
deleted file mode 100644
index 4ccaae3430..0000000000
--- a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClL2NormalizationFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeUtils.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output)
-{
- const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
-
- arm_compute::NormalizationLayerInfo normalizationInfo =
- CreateAclNormalizationLayerInfoForL2Normalization(input);
-
- return arm_compute::CLNormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
-}
-
-ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<L2NormalizationQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClL2NormalizationFloatWorkload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0]));
-}
-
-void ClL2NormalizationFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClL2NormalizationFloatWorkload_Execute");
- m_Layer.run();
-}
-
-} //namespace armnn
-
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.hpp
deleted file mode 100644
index f3f7de110a..0000000000
--- a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.hpp
+++ /dev/null
@@ -1,34 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output);
-
-class ClL2NormalizationFloatWorkload : public FloatWorkload<L2NormalizationQueueDescriptor>
-{
-public:
- ClL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- void Execute() const override;
-
-private:
- // Purposely not a CLL2Normalize function. See constructor.
- mutable arm_compute::CLNormalizationLayer m_Layer;
-};
-
-} //namespace armnn
-
-
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.cpp
deleted file mode 100644
index 09a34c2d02..0000000000
--- a/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.cpp
+++ /dev/null
@@ -1,408 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClLstmFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-#include "backends/ClLayerSupport.hpp"
-
-#include <arm_compute/runtime/CL/functions/CLLSTMLayer.h>
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
- : FloatWorkload<LstmQueueDescriptor>(descriptor, info)
-{
- arm_compute::LSTMParams<arm_compute::ICLTensor> lstm_param;
-
- // Basic parameters
- m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
-
- m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
-
- m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
-
- m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
-
- m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
-
- m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
-
- m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
-
- m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
-
- m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
-
- // for future reference: check the AndroidNN API for the logic here
- if (!m_Data.m_Parameters.m_CifgEnabled)
- {
- m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
-
- m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
-
- m_CellToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- if (m_Data.m_CellToInputWeights != nullptr)
- {
- BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
- }
-
- m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
-
- lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
- m_RecurrentToInputWeightsTensor.get(),
- m_Data.m_CellToInputWeights != nullptr ? m_CellToInputWeightsTensor.get() : nullptr,
- m_InputGateBiasTensor.get());
- }
-
- if (m_Data.m_Parameters.m_ProjectionEnabled)
- {
- m_ProjectionWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
-
- m_ProjectionBiasTensor = std::make_unique<arm_compute::CLTensor>();
- if (m_Data.m_ProjectionBias != nullptr)
- {
- BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
- }
-
- lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
- m_Data.m_ProjectionBias != nullptr ? m_ProjectionBiasTensor.get() : nullptr);
- }
-
- if (m_Data.m_Parameters.m_PeepholeEnabled)
- {
- m_CellToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
-
- m_CellToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
-
- lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
- }
-
- const arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- const arm_compute::ICLTensor& output_state_in = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- const arm_compute::ICLTensor& cell_state_in = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
-
- arm_compute::ICLTensor& output_state_out = static_cast<IClTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
- arm_compute::ICLTensor& cell_state_out = static_cast<IClTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[3])->GetTensor();
-
- // Get the batch_size and the num_units from the cellStateIn dimensions
- const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
- const unsigned int batch_size = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
- const unsigned int num_units = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
-
- m_ScratchBuffer = std::make_unique<arm_compute::CLTensor>();
- if (m_Data.m_Parameters.m_CifgEnabled)
- {
- // 2D tensor with dimensions [num_units * 4, batch_size] with CIFG
- armnn::TensorInfo scratchBuffer1({ batch_size, num_units * 4 }, DataType::Float32);
- BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer1);
- }
- else
- {
- // scratch_buffer [num_units * 3, batch_size] without CIFG
- armnn::TensorInfo scratchBuffer2({ batch_size, num_units * 3 }, DataType::Float32);
- BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer2);
- }
-
- float cell_threshold = m_Data.m_Parameters.m_ClippingThresCell;
- float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
-
- // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
- arm_compute::ActivationLayerInfo activationLayerInfo;
- if (m_Data.m_Parameters.m_ActivationFunc == 0)
- {
- // no activation, do nothing
- }
- else if (m_Data.m_Parameters.m_ActivationFunc == 1)
- {
- activationLayerInfo = arm_compute::ActivationLayerInfo(
- arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
- }
- else if (m_Data.m_Parameters.m_ActivationFunc == 3)
- {
- activationLayerInfo = arm_compute::ActivationLayerInfo(
- arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
- }
- else if (m_Data.m_Parameters.m_ActivationFunc == 4)
- {
- activationLayerInfo = arm_compute::ActivationLayerInfo(
- arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
- }
- else if (m_Data.m_Parameters.m_ActivationFunc == 6)
- {
- activationLayerInfo = arm_compute::ActivationLayerInfo(
- arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
- }
- else
- {
- throw armnn::Exception("Wrong Type of Activation Function!");
- }
-
-
- m_LstmLayer.configure(&input, m_InputToForgetWeightsTensor.get(), m_InputToCellWeightsTensor.get(),
- m_InputToOutputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(),
- m_RecurrentToCellWeightsTensor.get(), m_RecurrentToOutputWeightsTensor.get(),
- m_ForgetGateBiasTensor.get(), m_CellBiasTensor.get(), m_OutputGateBiasTensor.get(),
- &output_state_in, &cell_state_in, m_ScratchBuffer.get(), &output_state_out,
- &cell_state_out, &output, lstm_param, activationLayerInfo,
- cell_threshold, projection_threshold);
-
- armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
-
- InitialiseArmComputeClTensorData(*m_InputToForgetWeightsTensor,
- m_Data.m_InputToForgetWeights->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_InputToCellWeightsTensor,
- m_Data.m_InputToCellWeights->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_InputToOutputWeightsTensor,
- m_Data.m_InputToOutputWeights->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_RecurrentToForgetWeightsTensor,
- m_Data.m_RecurrentToForgetWeights->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_RecurrentToCellWeightsTensor,
- m_Data.m_RecurrentToCellWeights->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_RecurrentToOutputWeightsTensor,
- m_Data.m_RecurrentToOutputWeights->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_ForgetGateBiasTensor,
- m_Data.m_ForgetGateBias->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_CellBiasTensor,
- m_Data.m_CellBias->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_OutputGateBiasTensor,
- m_Data.m_OutputGateBias->GetConstTensor<float>());
-
- if (!m_Data.m_Parameters.m_CifgEnabled)
- {
- InitialiseArmComputeClTensorData(*m_InputToInputWeightsTensor,
- m_Data.m_InputToInputWeights->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_RecurrentToInputWeightsTensor,
- m_Data.m_RecurrentToInputWeights->GetConstTensor<float>());
- if (m_Data.m_CellToInputWeights != nullptr)
- {
- InitialiseArmComputeClTensorData(*m_CellToInputWeightsTensor,
- m_Data.m_CellToInputWeights->GetConstTensor<float>());
- }
- InitialiseArmComputeClTensorData(*m_InputGateBiasTensor,
- m_Data.m_InputGateBias->GetConstTensor<float>());
- }
-
- if (m_Data.m_Parameters.m_ProjectionEnabled)
- {
- InitialiseArmComputeClTensorData(*m_ProjectionWeightsTensor,
- m_Data.m_ProjectionWeights->GetConstTensor<float>());
- if (m_Data.m_ProjectionBias != nullptr)
- {
- InitialiseArmComputeClTensorData(*m_ProjectionBiasTensor,
- m_Data.m_ProjectionBias->GetConstTensor<float>());
- }
- }
-
- if (m_Data.m_Parameters.m_PeepholeEnabled)
- {
- InitialiseArmComputeClTensorData(*m_CellToForgetWeightsTensor,
- m_Data.m_CellToForgetWeights->GetConstTensor<float>());
- InitialiseArmComputeClTensorData(*m_CellToOutputWeightsTensor,
- m_Data.m_CellToOutputWeights->GetConstTensor<float>());
- }
-
- // Force Compute Library to perform the necessary copying and reshaping, after which
- // delete all the input tensors that will no longer be needed
- m_LstmLayer.prepare();
- FreeUnusedTensors();
-}
-
-void ClLstmFloatWorkload::Execute() const
-{
- m_LstmLayer.run();
-}
-
-arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights,
- const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights,
- const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights,
- const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias,
- const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights,
- const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias,
- const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias,
- const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights)
-{
- arm_compute::LSTMParams<arm_compute::ITensorInfo> lstm_params_info;
-
- // The inputs and the outputs
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputStateInInfo = BuildArmComputeTensorInfo(outputStateIn);
- const arm_compute::TensorInfo aclCellStateInInfo = BuildArmComputeTensorInfo(cellStateIn);
- const arm_compute::TensorInfo aclScratchBufferInfo = BuildArmComputeTensorInfo(scratchBuffer);
- const arm_compute::TensorInfo aclOutputStateOutInfo = BuildArmComputeTensorInfo(outputStateOut);
- const arm_compute::TensorInfo aclCellStateOutInfo = BuildArmComputeTensorInfo(cellStateOut);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-
- // Basic parameters
- const arm_compute::TensorInfo aclInputToForgetWeightsInfo = BuildArmComputeTensorInfo(inputToForgetWeights);
- const arm_compute::TensorInfo aclInputToCellWeightsInfo = BuildArmComputeTensorInfo(inputToCellWeights);
- const arm_compute::TensorInfo aclInputToOutputWeightsInfo = BuildArmComputeTensorInfo(inputToOutputWeights);
- const arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo
- = BuildArmComputeTensorInfo(recurrentToForgetWeights);
- const arm_compute::TensorInfo aclRecurrentToCellWeightsInfo
- = BuildArmComputeTensorInfo(recurrentToCellWeights);
- const arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo
- = BuildArmComputeTensorInfo(recurrentToOutputWeights);
- const arm_compute::TensorInfo aclForgetGateBiasInfo = BuildArmComputeTensorInfo(forgetGateBias);
- const arm_compute::TensorInfo aclCellBiasInfo = BuildArmComputeTensorInfo(cellBias);
- const arm_compute::TensorInfo aclOutputGateBiasInfo = BuildArmComputeTensorInfo(outputGateBias);
-
- arm_compute::TensorInfo aclInputToInputWeightsInfo;
- arm_compute::TensorInfo aclRecurrentToInputWeightsInfo;
- arm_compute::TensorInfo aclCellToInputWeightsInfo;
- arm_compute::TensorInfo aclInputGateBiasInfo;
- arm_compute::TensorInfo aclProjectionWeightsInfo;
- arm_compute::TensorInfo aclProjectionBiasInfo;
- arm_compute::TensorInfo aclCellToForgetWeightsInfo;
- arm_compute::TensorInfo aclCellToOutputWeightsInfo;
-
- if (!descriptor.m_CifgEnabled)
- {
- armnn::TensorInfo inputToInputWInfo = *inputToInputWeights;
- aclInputToInputWeightsInfo = BuildArmComputeTensorInfo(inputToInputWInfo);
- armnn::TensorInfo recurrentToInputWInfo = *recurrentToInputWeights;
- aclRecurrentToInputWeightsInfo = BuildArmComputeTensorInfo(recurrentToInputWInfo);
-
- if (cellToInputWeights != nullptr)
- {
- armnn::TensorInfo cellToInputWInfo = *cellToInputWeights;
- aclCellToInputWeightsInfo = BuildArmComputeTensorInfo(cellToInputWInfo);
- }
- armnn::TensorInfo inputGateBiasInfo = *inputGateBias;
- aclInputGateBiasInfo = BuildArmComputeTensorInfo(inputGateBiasInfo);
- lstm_params_info.set_cifg_params(&aclInputToInputWeightsInfo, &aclRecurrentToInputWeightsInfo,
- cellToInputWeights != nullptr ? &aclCellToInputWeightsInfo: nullptr,
- &aclInputGateBiasInfo);
- }
-
- if (descriptor.m_ProjectionEnabled)
- {
- const armnn::TensorInfo& projectionWInfo = *projectionWeights;
- aclProjectionWeightsInfo = BuildArmComputeTensorInfo(projectionWInfo);
-
- if (projectionBias != nullptr)
- {
- const armnn::TensorInfo& projectionBiasInfo = *projectionBias;
- aclProjectionBiasInfo = BuildArmComputeTensorInfo(projectionBiasInfo);
- }
- lstm_params_info.set_projection_params(&aclProjectionWeightsInfo,
- projectionBias != nullptr ? &aclProjectionBiasInfo: nullptr);
- }
-
- if (descriptor.m_PeepholeEnabled)
- {
- const armnn::TensorInfo& cellToForgetWInfo = *cellToForgetWeights;
- aclCellToForgetWeightsInfo = BuildArmComputeTensorInfo(cellToForgetWInfo);
- const armnn::TensorInfo& cellToOutputWInfo = *cellToOutputWeights;
- aclCellToOutputWeightsInfo = BuildArmComputeTensorInfo(cellToOutputWInfo);
- lstm_params_info.set_peephole_params(&aclCellToForgetWeightsInfo, &aclCellToOutputWeightsInfo);
- }
-
- float cell_threshold = descriptor.m_ClippingThresCell;
- float projection_threshold = descriptor.m_ClippingThresProj;
-
- // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
- arm_compute::ActivationLayerInfo activationLayerInfo;
- if (descriptor.m_ActivationFunc == 0)
- {
- // no activation, do nothing
- }
- else if (descriptor.m_ActivationFunc == 1)
- {
- activationLayerInfo = arm_compute::ActivationLayerInfo(
- arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
- }
- else if (descriptor.m_ActivationFunc == 3)
- {
- activationLayerInfo = arm_compute::ActivationLayerInfo(
- arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
- }
- else if (descriptor.m_ActivationFunc == 4)
- {
- activationLayerInfo = arm_compute::ActivationLayerInfo(
- arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
- }
- else if (descriptor.m_ActivationFunc == 6)
- {
- activationLayerInfo = arm_compute::ActivationLayerInfo(
- arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
- }
- else
- {
- throw armnn::Exception("Wrong Type of Activation Function!");
- }
-
- return arm_compute::CLLSTMLayer::validate(&aclInputInfo, &aclInputToForgetWeightsInfo,
- &aclInputToCellWeightsInfo,
- &aclInputToOutputWeightsInfo,
- &aclRecurrentToForgetWeightsInfo,
- &aclRecurrentToCellWeightsInfo,
- &aclRecurrentToOutputWeightsInfo,
- &aclForgetGateBiasInfo,
- &aclCellBiasInfo,
- &aclOutputGateBiasInfo,
- &aclOutputStateInInfo, &aclCellStateInInfo,
- &aclScratchBufferInfo, &aclOutputStateOutInfo,
- &aclCellStateOutInfo, &aclOutputInfo,
- lstm_params_info, activationLayerInfo,
- cell_threshold, projection_threshold);
-}
-
-void ClLstmFloatWorkload::FreeUnusedTensors()
-{
- FreeTensorIfUnused(m_InputToInputWeightsTensor);
- FreeTensorIfUnused(m_InputToForgetWeightsTensor);
- FreeTensorIfUnused(m_InputToCellWeightsTensor);
- FreeTensorIfUnused(m_InputToOutputWeightsTensor);
- FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);
- FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);
- FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);
- FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);
- FreeTensorIfUnused(m_CellToInputWeightsTensor);
- FreeTensorIfUnused(m_CellToForgetWeightsTensor);
- FreeTensorIfUnused(m_CellToOutputWeightsTensor);
- FreeTensorIfUnused(m_InputGateBiasTensor);
- FreeTensorIfUnused(m_ForgetGateBiasTensor);
- FreeTensorIfUnused(m_CellBiasTensor);
- FreeTensorIfUnused(m_OutputGateBiasTensor);
- FreeTensorIfUnused(m_ProjectionWeightsTensor);
- FreeTensorIfUnused(m_ProjectionBiasTensor);
- FreeTensorIfUnused(m_ScratchBuffer);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.hpp
deleted file mode 100644
index 61d8fc3e6c..0000000000
--- a/src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.hpp
+++ /dev/null
@@ -1,68 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-class ClLstmFloatWorkload : public FloatWorkload<LstmQueueDescriptor>
-{
-public:
- ClLstmFloatWorkload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLLSTMLayer m_LstmLayer;
-
- std::unique_ptr<arm_compute::CLTensor> m_InputToInputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_InputToForgetWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_InputToCellWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_InputToOutputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_RecurrentToInputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_RecurrentToForgetWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_RecurrentToCellWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_RecurrentToOutputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_CellToInputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_CellToForgetWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_CellToOutputWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_InputGateBiasTensor;
- std::unique_ptr<arm_compute::CLTensor> m_ForgetGateBiasTensor;
- std::unique_ptr<arm_compute::CLTensor> m_CellBiasTensor;
- std::unique_ptr<arm_compute::CLTensor> m_OutputGateBiasTensor;
- std::unique_ptr<arm_compute::CLTensor> m_ProjectionWeightsTensor;
- std::unique_ptr<arm_compute::CLTensor> m_ProjectionBiasTensor;
-
- std::unique_ptr<arm_compute::CLTensor> m_ScratchBuffer;
-
- void FreeUnusedTensors();
-};
-
-arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor &descriptor,
- const TensorInfo& inputToForgetWeights,
- const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights,
- const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights,
- const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
- const TensorInfo& outputGateBias,
- const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights,
- const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias,
- const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias,
- const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights);
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.cpp
deleted file mode 100644
index 151f1e0ee7..0000000000
--- a/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClMergerFloatWorkload.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-void ClMergerFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClMergerFloatWorkload_Execute");
- ClBaseMergerWorkload::Execute();
-}
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.hpp
deleted file mode 100644
index 9782f7a8f3..0000000000
--- a/src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClBaseMergerWorkload.hpp"
-
-namespace armnn
-{
-
-class ClMergerFloatWorkload : public ClBaseMergerWorkload<DataType::Float16, DataType::Float32>
-{
-public:
- using ClBaseMergerWorkload<DataType::Float16, DataType::Float32>::ClBaseMergerWorkload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.cpp
deleted file mode 100644
index 9d1060d857..0000000000
--- a/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClMergerUint8Workload.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-void ClMergerUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClMergerUint8Workload_Execute");
- ClBaseMergerWorkload<DataType::QuantisedAsymm8>::Execute();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.hpp
deleted file mode 100644
index cbfc19a0f2..0000000000
--- a/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClBaseMergerWorkload.hpp"
-
-namespace armnn
-{
-
-class ClMergerUint8Workload : public ClBaseMergerWorkload<armnn::DataType::QuantisedAsymm8>
-{
-public:
- using ClBaseMergerWorkload<armnn::DataType::QuantisedAsymm8>::ClBaseMergerWorkload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.cpp
deleted file mode 100644
index c3330a98e8..0000000000
--- a/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClMultiplicationFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output)
-{
- const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
- const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
- const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
-
- // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it,
- // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be
- // ignored for F32 tensors.
- return arm_compute::CLPixelWiseMultiplication::validate(&aclInput1,
- &aclInput2,
- &aclOutput,
- 1.0f,
- arm_compute::ConvertPolicy::SATURATE,
- arm_compute::RoundingPolicy::TO_ZERO);
-}
-
-
-ClMultiplicationFloatWorkload::ClMultiplicationFloatWorkload(const MultiplicationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<MultiplicationQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClMultiplicationFloatWorkload", 2, 1);
-
- arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- // Construct
- m_PixelWiseMultiplication.configure(&input0,
- &input1,
- &output,
- 1.0f,
- arm_compute::ConvertPolicy::SATURATE,
- arm_compute::RoundingPolicy::TO_NEAREST_EVEN);
-}
-
-void ClMultiplicationFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClMultiplicationFloatWorkload_Execute");
-
- // Executes the layer.
- m_PixelWiseMultiplication.run();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.hpp
deleted file mode 100644
index c2d6b7697a..0000000000
--- a/src/armnn/backends/ClWorkloads/ClMultiplicationFloatWorkload.hpp
+++ /dev/null
@@ -1,34 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output);
-
-class ClMultiplicationFloatWorkload : public FloatWorkload<MultiplicationQueueDescriptor>
-{
-public:
- ClMultiplicationFloatWorkload(const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- using FloatWorkload<MultiplicationQueueDescriptor>::FloatWorkload;
- void Execute() const override;
-
-private:
- mutable arm_compute::CLPixelWiseMultiplication m_PixelWiseMultiplication;
-};
-
-} //namespace armnn
-
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.cpp
deleted file mode 100644
index d2625354ef..0000000000
--- a/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClNormalizationFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ClLayerSupport.hpp"
-#include "backends/ArmComputeUtils.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo& input, const TensorInfo& output,
- const NormalizationDescriptor& descriptor)
-{
- const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
-
- arm_compute::NormalizationLayerInfo layerInfo =
- armcomputetensorutils::BuildArmComputeNormalizationLayerInfo(descriptor);
-
- return arm_compute::CLNormalizationLayer::validate(&aclInputInfo, &aclOutputInfo, layerInfo);
-}
-
-ClNormalizationFloatWorkload::ClNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<NormalizationQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClNormalizationFloatWorkload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- arm_compute::NormalizationLayerInfo normalizationInfo =
- armcomputetensorutils::BuildArmComputeNormalizationLayerInfo(m_Data.m_Parameters);
-
- m_NormalizationLayer.configure(&input, &output, normalizationInfo);
-};
-
-void ClNormalizationFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClNormalizationFloatWorkload_Execute");
- m_NormalizationLayer.run();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.hpp
deleted file mode 100644
index f02d0adb70..0000000000
--- a/src/armnn/backends/ClWorkloads/ClNormalizationFloatWorkload.hpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const NormalizationDescriptor& descriptor);
-
-class ClNormalizationFloatWorkload : public FloatWorkload<NormalizationQueueDescriptor>
-{
-public:
- ClNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLNormalizationLayer m_NormalizationLayer;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClPermuteWorkload.cpp b/src/armnn/backends/ClWorkloads/ClPermuteWorkload.cpp
deleted file mode 100644
index 29d98bf0eb..0000000000
--- a/src/armnn/backends/ClWorkloads/ClPermuteWorkload.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClPermuteWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-
-#include <arm_compute/core/Error.h>
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-arm_compute::Status ClPermuteWorkloadValidate(const PermuteDescriptor& descriptor)
-{
- const armnn::PermutationVector& perm = descriptor.m_DimMappings;
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!perm.IsEqual({ 0U, 3U, 1U, 2U })
- && !perm.IsEqual({ 0U, 2U, 3U, 1U })
- && !perm.IsEqual({ 3U, 2U, 0U, 1U }),
- "Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported");
-
- return arm_compute::Status{};
-}
-
-template <armnn::DataType... DataTypes>
-ClPermuteWorkload<DataTypes...>::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : TypedWorkload<PermuteQueueDescriptor, DataTypes...>(descriptor, info)
-{
- using armcomputetensorutils::BuildArmComputePermutationVector;
-
- m_Data.ValidateInputsOutputs(GetName(), 1, 1);
-
- const arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings;
-
- // Run the layer.
- m_PermuteFunction.configure(&input, &output, BuildArmComputePermutationVector(mappings));
-}
-
-template <armnn::DataType... DataTypes>
-void ClPermuteWorkload<DataTypes...>::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL( GetName() + "_Execute");
- m_PermuteFunction.run();
-}
-
-template class ClPermuteWorkload<DataType::Float16, DataType::Float32>;
-template class ClPermuteWorkload<DataType::QuantisedAsymm8>;
-
-} // namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClPermuteWorkload.hpp b/src/armnn/backends/ClWorkloads/ClPermuteWorkload.hpp
deleted file mode 100644
index a1f3161921..0000000000
--- a/src/armnn/backends/ClWorkloads/ClPermuteWorkload.hpp
+++ /dev/null
@@ -1,42 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-#include <armnn/TypesUtils.hpp>
-#include <arm_compute/runtime/CL/functions/CLPermute.h>
-
-#include <string>
-
-namespace armnn
-{
-
-arm_compute::Status ClPermuteWorkloadValidate(const PermuteDescriptor& descriptor);
-
-template<armnn::DataType... DataTypes>
-class ClPermuteWorkload : public TypedWorkload<PermuteQueueDescriptor, DataTypes...>
-{
-public:
- static const std::string& GetName()
- {
- static const std::string name = std::string("ClPermuteWorkload");
- return name;
- }
-
- ClPermuteWorkload(const PermuteQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- using TypedWorkload<PermuteQueueDescriptor, DataTypes...>::m_Data;
- mutable arm_compute::CLPermute m_PermuteFunction;
-};
-
-using ClPermuteFloatWorkload = ClPermuteWorkload<DataType::Float16, DataType::Float32>;
-using ClPermuteUint8Workload = ClPermuteWorkload<DataType::QuantisedAsymm8>;
-
-} // namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.cpp b/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.cpp
deleted file mode 100644
index a1ee50b39f..0000000000
--- a/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClPooling2dBaseWorkload.hpp"
-#include "backends/ClLayerSupport.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/ArmComputeUtils.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const Pooling2dDescriptor& descriptor)
-{
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-
- arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor);
-
- return arm_compute::CLPoolingLayer::validate(&aclInputInfo, &aclOutputInfo, layerInfo);
-}
-
-template <armnn::DataType... dataTypes>
-ClPooling2dBaseWorkload<dataTypes...>::ClPooling2dBaseWorkload(
- const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info, const std::string& name)
- : TypedWorkload<Pooling2dQueueDescriptor, dataTypes...>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs(name, 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(m_Data.m_Parameters);
-
- // Run the layer.
- m_PoolingLayer.configure(&input, &output, layerInfo);
-}
-
-template class ClPooling2dBaseWorkload<DataType::Float16, DataType::Float32>;
-template class ClPooling2dBaseWorkload<DataType::QuantisedAsymm8>;
-
-}
diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.hpp b/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.hpp
deleted file mode 100644
index ea7ddfb41b..0000000000
--- a/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.hpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const Pooling2dDescriptor& descriptor);
-
-// Base class template providing an implementation of the Pooling2d layer common to all data types.
-template <armnn::DataType... dataTypes>
-class ClPooling2dBaseWorkload : public TypedWorkload<Pooling2dQueueDescriptor, dataTypes...>
-{
-public:
- using TypedWorkload<Pooling2dQueueDescriptor, dataTypes...>::m_Data;
-
- ClPooling2dBaseWorkload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info,
- const std::string& name);
-
-protected:
- mutable arm_compute::CLPoolingLayer m_PoolingLayer;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.cpp
deleted file mode 100644
index dc9d17f0ae..0000000000
--- a/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClPooling2dFloatWorkload.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClPooling2dFloatWorkload::ClPooling2dFloatWorkload(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : ClPooling2dBaseWorkload<DataType::Float16, DataType::Float32>(descriptor, info, "ClPooling2dFloatWorkload")
-{
-}
-
-void ClPooling2dFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClPooling2dFloatWorkload_Execute");
- m_PoolingLayer.run();
-}
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.hpp
deleted file mode 100644
index 71648d40f4..0000000000
--- a/src/armnn/backends/ClWorkloads/ClPooling2dFloatWorkload.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include "ClPooling2dBaseWorkload.hpp"
-
-namespace armnn
-{
-class ClPooling2dFloatWorkload : public ClPooling2dBaseWorkload<DataType::Float16, DataType::Float32>
-{
-public:
- ClPooling2dFloatWorkload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.cpp
deleted file mode 100644
index 0b4b15f806..0000000000
--- a/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClPooling2dUint8Workload.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClPooling2dUint8Workload::ClPooling2dUint8Workload(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : ClPooling2dBaseWorkload<DataType::QuantisedAsymm8>(descriptor, info, "ClPooling2dUint8Workload")
-{
-}
-
-void ClPooling2dUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClPooling2dUint8Workload_Execute");
- m_PoolingLayer.run();
-}
-
-} //namespace armnn
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.hpp
deleted file mode 100644
index 2baf2aa708..0000000000
--- a/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.hpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include "ClPooling2dBaseWorkload.hpp"
-
-namespace armnn
-{
-
-class ClPooling2dUint8Workload : public ClPooling2dBaseWorkload<DataType::QuantisedAsymm8>
-{
-public:
- ClPooling2dUint8Workload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-};
-
-} //namespace armnn
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.cpp
deleted file mode 100644
index ea50436a66..0000000000
--- a/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClReshapeFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClReshapeFloatWorkload::ClReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info)
- : FloatWorkload<ReshapeQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClReshapeFloatWorkload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_Layer.configure(&input, &output);
-}
-
-void ClReshapeFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClReshapeFloatWorkload_Execute");
- m_Layer.run();
-}
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.hpp
deleted file mode 100644
index 48265143e5..0000000000
--- a/src/armnn/backends/ClWorkloads/ClReshapeFloatWorkload.hpp
+++ /dev/null
@@ -1,28 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-class ClReshapeFloatWorkload : public FloatWorkload<ReshapeQueueDescriptor>
-{
-public:
- ClReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- void Execute() const override;
-
-private:
- mutable arm_compute::CLReshapeLayer m_Layer;
-};
-
-} //namespace armnn
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.cpp
deleted file mode 100644
index 82bd93ef9c..0000000000
--- a/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClReshapeUint8Workload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-ClReshapeUint8Workload::ClReshapeUint8Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info)
- : Uint8Workload<ReshapeQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClReshapeUint8Workload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- m_Layer.configure(&input, &output);
-}
-
-void ClReshapeUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClReshapeUint8Workload_Execute");
-
- m_Layer.run();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.hpp
deleted file mode 100644
index c9801a3ae1..0000000000
--- a/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.hpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-// Reshape
-class ClReshapeUint8Workload : public Uint8Workload<ReshapeQueueDescriptor>
-{
-public:
- ClReshapeUint8Workload( const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- void Execute() const override;
-
-private:
- mutable arm_compute::CLReshapeLayer m_Layer;
-};
-
-} //namespace armnn
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.cpp
deleted file mode 100644
index 8348afb76a..0000000000
--- a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClResizeBilinearFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ClLayerSupport.hpp"
-#include "backends/ArmComputeUtils.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClResizeBilinearFloatWorkload::ClResizeBilinearFloatWorkload(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<ResizeBilinearQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("ClResizeBilinearFloatWorkload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_ResizeBilinearLayer.configure(&input, &output, arm_compute::InterpolationPolicy::BILINEAR,
- arm_compute::BorderMode::REPLICATE, arm_compute::PixelValue(0.f),
- arm_compute::SamplingPolicy::TOP_LEFT);
-};
-
-void ClResizeBilinearFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloatWorkload_Execute");
- m_ResizeBilinearLayer.run();
-}
-
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.hpp
deleted file mode 100644
index f2ee67f5dd..0000000000
--- a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloatWorkload.hpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-class ClResizeBilinearFloatWorkload : public FloatWorkload<ResizeBilinearQueueDescriptor>
-{
-public:
- ClResizeBilinearFloatWorkload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLScale m_ResizeBilinearLayer;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxBaseWorkload.cpp b/src/armnn/backends/ClWorkloads/ClSoftmaxBaseWorkload.cpp
deleted file mode 100644
index b4ea236d49..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSoftmaxBaseWorkload.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClSoftmaxBaseWorkload.hpp"
-
-#include "backends/ArmComputeTensorUtils.hpp"
-
-#include <arm_compute/runtime/CL/functions/CLSoftmaxLayer.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output)
-{
- // NOTE: We report 4D Softmax as unsupported until full support is added to ACL
- if(input.GetShape().GetNumDimensions() >= 4u)
- {
- return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "4d softmax is not supported");
- }
-
- const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
-
- return arm_compute::CLSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo);
-}
-
-}
diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxBaseWorkload.hpp b/src/armnn/backends/ClWorkloads/ClSoftmaxBaseWorkload.hpp
deleted file mode 100644
index b800056cdf..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSoftmaxBaseWorkload.hpp
+++ /dev/null
@@ -1,17 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/Tensor.hpp>
-#include <arm_compute/core/Error.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output);
-
-} // namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.cpp
deleted file mode 100644
index c34b5a2a74..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClSoftmaxFloatWorkload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
- : FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
- , m_SoftmaxLayer(memoryManager)
-{
- m_Data.ValidateInputsOutputs("ClSoftmaxFloatWorkload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta);
-}
-
-void ClSoftmaxFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloatWorkload_Execute");
- m_SoftmaxLayer.run();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.hpp
deleted file mode 100644
index 965b845cf8..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSoftmaxFloatWorkload.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-#include "arm_compute/runtime/MemoryManagerOnDemand.h"
-
-#include <memory>
-
-namespace armnn
-{
-
-class ClSoftmaxFloatWorkload : public FloatWorkload<SoftmaxQueueDescriptor>
-{
-public:
- ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
- void Execute() const override;
-
-private:
- mutable arm_compute::CLSoftmaxLayer m_SoftmaxLayer;
-};
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.cpp
deleted file mode 100644
index 1bb9628d74..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClSoftmaxUint8Workload.hpp"
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
- : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
- , m_SoftmaxLayer(memoryManager)
-{
- m_Data.ValidateInputsOutputs("ClSoftmaxUint8Workload", 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- const auto outputQuantization = output.info()->quantization_info();
-
- if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
- {
- throw InvalidArgumentException(
- "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
- }
-
- m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta);
-}
-
-void ClSoftmaxUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxUint8Workload_Execute");
-
- m_SoftmaxLayer.run();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.hpp
deleted file mode 100644
index 29427a5976..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.hpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-#include "arm_compute/runtime/MemoryManagerOnDemand.h"
-
-#include <memory>
-
-namespace armnn
-{
-// Softmax
-class ClSoftmaxUint8Workload : public Uint8Workload<SoftmaxQueueDescriptor>
-{
-public:
- ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
-
- void Execute() const override;
-private:
-
- mutable arm_compute::CLSoftmaxLayer m_SoftmaxLayer;
-};
-
-} //namespace armnn
-
diff --git a/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.cpp
deleted file mode 100644
index 5fd634bdb6..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClSplitterFloatWorkload.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-void ClSplitterFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSplitterFloatWorkload_Execute");
- ClBaseSplitterWorkload::Execute();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.hpp b/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.hpp
deleted file mode 100644
index a0b5846f8e..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSplitterFloatWorkload.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClBaseSplitterWorkload.hpp"
-
-namespace armnn
-{
-
-class ClSplitterFloatWorkload : public ClBaseSplitterWorkload<DataType::Float16, DataType::Float32>
-{
-public:
- using ClBaseSplitterWorkload<DataType::Float16, DataType::Float32>::ClBaseSplitterWorkload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.cpp
deleted file mode 100644
index 50a251ada7..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClSplitterUint8Workload.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-void ClSplitterUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSplitterUint8Workload_Execute");
- ClBaseSplitterWorkload::Execute();
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.hpp
deleted file mode 100644
index 19e8be5034..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClBaseSplitterWorkload.hpp"
-
-namespace armnn
-{
-class ClSplitterUint8Workload : public ClBaseSplitterWorkload<DataType::QuantisedAsymm8>
-{
-public:
- using ClBaseSplitterWorkload<DataType::QuantisedAsymm8>::ClBaseSplitterWorkload;
- virtual void Execute() const override;
-};
-} //namespace armnn
-
-
-
diff --git a/src/armnn/backends/ClWorkloads/ClSubtractionWorkload.cpp b/src/armnn/backends/ClWorkloads/ClSubtractionWorkload.cpp
deleted file mode 100644
index 1c70130fa4..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSubtractionWorkload.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClSubtractionWorkload.hpp"
-
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
-
-template <armnn::DataType... T>
-ClSubtractionWorkload<T...>::ClSubtractionWorkload(const SubtractionQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : TypedWorkload<SubtractionQueueDescriptor, T...>(descriptor, info)
-{
- this->m_Data.ValidateInputsOutputs("ClSubtractionWorkload", 2, 1);
-
- arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
- m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy);
-}
-
-template <armnn::DataType... T>
-void ClSubtractionWorkload<T...>::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionWorkload_Execute");
- m_Layer.run();
-}
-
-bool ClSubtractionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
-{
- const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
- const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-
- const arm_compute::Status aclStatus = arm_compute::CLArithmeticSubtraction::validate(&aclInput0Info,
- &aclInput1Info,
- &aclOutputInfo,
- g_AclConvertPolicy);
-
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
- return supported;
-}
-
-} //namespace armnn
-
-template class armnn::ClSubtractionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>;
-template class armnn::ClSubtractionWorkload<armnn::DataType::QuantisedAsymm8>;
diff --git a/src/armnn/backends/ClWorkloads/ClSubtractionWorkload.hpp b/src/armnn/backends/ClWorkloads/ClSubtractionWorkload.hpp
deleted file mode 100644
index 59a5f01e73..0000000000
--- a/src/armnn/backends/ClWorkloads/ClSubtractionWorkload.hpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-template <armnn::DataType... dataTypes>
-class ClSubtractionWorkload : public TypedWorkload<SubtractionQueueDescriptor, dataTypes...>
-{
-public:
- ClSubtractionWorkload(const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- void Execute() const override;
-
-private:
- mutable arm_compute::CLArithmeticSubtraction m_Layer;
-};
-
-bool ClSubtractionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
-} //namespace armnn
diff --git a/src/armnn/backends/ClWorkloads/ClWorkloadUtils.hpp b/src/armnn/backends/ClWorkloads/ClWorkloadUtils.hpp
deleted file mode 100644
index 6f1b155745..0000000000
--- a/src/armnn/backends/ClWorkloads/ClWorkloadUtils.hpp
+++ /dev/null
@@ -1,62 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include "OpenClTimer.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-#include "backends/CpuTensorHandle.hpp"
-
-#include <Half.hpp>
-
-#define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \
- ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
- name, \
- armnn::OpenClTimer(), \
- armnn::WallClockTimer())
-
-namespace armnn
-{
-
-template <typename T>
-void CopyArmComputeClTensorData(const T* srcData, arm_compute::CLTensor& dstTensor)
-{
- {
- ARMNN_SCOPED_PROFILING_EVENT_CL("MapClTensorForWriting");
- dstTensor.map(true);
- }
-
- {
- ARMNN_SCOPED_PROFILING_EVENT_CL("CopyToClTensor");
- armcomputetensorutils::CopyArmComputeITensorData<T>(srcData, dstTensor);
- }
-
- dstTensor.unmap();
-}
-
-template <typename T>
-void InitialiseArmComputeClTensorData(arm_compute::CLTensor& clTensor, const T* data)
-{
- armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
- CopyArmComputeClTensorData<T>(data, clTensor);
-}
-
-inline void InitializeArmComputeClTensorDataForFloatTypes(arm_compute::CLTensor& clTensor,
- const ConstCpuTensorHandle *handle)
-{
- BOOST_ASSERT(handle);
- switch(handle->GetTensorInfo().GetDataType())
- {
- case DataType::Float16:
- InitialiseArmComputeClTensorData(clTensor, handle->GetConstTensor<armnn::Half>());
- break;
- case DataType::Float32:
- InitialiseArmComputeClTensorData(clTensor, handle->GetConstTensor<float>());
- break;
- default:
- BOOST_ASSERT_MSG(false, "Unexpected floating point type.");
- }
-};
-
-} //namespace armnn