aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/workloads/ClMaximumWorkload.cpp
diff options
context:
space:
mode:
authorkeidav01 <keith.davis@arm.com>2018-12-19 10:04:58 +0000
committerLes Bell <les.bell@arm.com>2018-12-20 15:46:49 +0000
commita959ee5f7753c073d62b0bf16ae86d18ce6fd8cc (patch)
tree52871e1c69d42dd90ef32e7d328448b040f07686 /src/backends/cl/workloads/ClMaximumWorkload.cpp
parent84c70e65a193aa5faa959d305af82783fa8f78b5 (diff)
downloadarmnn-a959ee5f7753c073d62b0bf16ae86d18ce6fd8cc.tar.gz
IVGCVSW-2211 Maximum operator support (CL)
* Added CLMaximumWorkload implementation * Added CL unit tests Change-Id: I922c83fe25d17be21a5d0f4e861038463cb09789
Diffstat (limited to 'src/backends/cl/workloads/ClMaximumWorkload.cpp')
-rw-r--r--src/backends/cl/workloads/ClMaximumWorkload.cpp58
1 files changed, 58 insertions, 0 deletions
diff --git a/src/backends/cl/workloads/ClMaximumWorkload.cpp b/src/backends/cl/workloads/ClMaximumWorkload.cpp
new file mode 100644
index 0000000000..cd3192d186
--- /dev/null
+++ b/src/backends/cl/workloads/ClMaximumWorkload.cpp
@@ -0,0 +1,58 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClMaximumWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <cl/ClLayerSupport.hpp>
+#include <cl/ClTensorHandle.hpp>
+#include <cl/ClLayerSupport.hpp>
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
+ const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ const arm_compute::Status aclStatus = arm_compute::CLElementwiseMax::validate(&aclInput0Info,
+ &aclInput1Info,
+ &aclOutputInfo);
+
+ return aclStatus;
+}
+
+ClMaximumWorkload::ClMaximumWorkload(const MaximumQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<MaximumQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("ClMaximumWorkload", 2, 1);
+
+ arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_MaximumLayer.configure(&input0, &input1, &output);
+}
+
+void ClMaximumWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClMaximumWorkload_Execute");
+ RunClFunction(m_MaximumLayer, CHECK_LOCATION());
+}
+
+} //namespace armnn