diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2019-05-31 09:09:44 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-05-31 08:56:20 +0000 |
commit | 20ec2497c4059e39f43a38ceb2ba99f1a7b760f1 (patch) | |
tree | 55030b657bb7e170af274df2dc9428cdc63e81a8 /src/backends/cl/workloads/ClQuantizeWorkload.cpp | |
parent | fabc289e7a371d5a3d564bed0d373da26f718ab3 (diff) | |
download | armnn-20ec2497c4059e39f43a38ceb2ba99f1a7b760f1.tar.gz |
IVGCVSW-3186 Add ClQuantizeWorkload
* Added ClQuantizeWorkload to enable quantization on CL backend
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Id49d5ec29514f6f853c2500a34b1a12444c49168
Diffstat (limited to 'src/backends/cl/workloads/ClQuantizeWorkload.cpp')
-rw-r--r-- | src/backends/cl/workloads/ClQuantizeWorkload.cpp | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/src/backends/cl/workloads/ClQuantizeWorkload.cpp b/src/backends/cl/workloads/ClQuantizeWorkload.cpp new file mode 100644 index 0000000000..230e346a00 --- /dev/null +++ b/src/backends/cl/workloads/ClQuantizeWorkload.cpp @@ -0,0 +1,47 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ClQuantizeWorkload.hpp" +#include "ClWorkloadUtils.hpp" + +#include <aclCommon/ArmComputeUtils.hpp> +#include <aclCommon/ArmComputeTensorUtils.hpp> + +#include <backendsCommon/CpuTensorHandle.hpp> + +#include <cl/ClLayerSupport.hpp> +#include <cl/ClTensorHandle.hpp> +#include <cl/ClLayerSupport.hpp> + +namespace armnn +{ +using namespace armcomputetensorutils; + +arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo& input, + const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); + + return arm_compute::CLQuantizationLayer::validate(&aclInputInfo, + &aclOutputInfo); +} + +ClQuantizeWorkload::ClQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload<QuantizeQueueDescriptor>(descriptor, info) +{ + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_Layer.configure(&input, &output); +} + +void ClQuantizeWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_CL("ClQuantizeWorkload_Execute"); + RunClFunction(m_Layer, CHECK_LOCATION()); +} + +} //namespace armnn |