From e03802edd37229a1868bacedd7571cc443810caf Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Mon, 11 Mar 2019 12:20:20 +0000 Subject: COMPMID-1936: Add support for QASYMM8 in CLQuantizeLayer. Change-Id: I9aa1f1f1753bcdee6a74ec15b4fb366f823788b4 Signed-off-by: Usama Arif Reviewed-on: https://review.mlplatform.org/c/850 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- src/runtime/CL/functions/CLQuantizationLayer.cpp | 52 +++++------------------- 1 file changed, 10 insertions(+), 42 deletions(-) (limited to 'src/runtime/CL') diff --git a/src/runtime/CL/functions/CLQuantizationLayer.cpp b/src/runtime/CL/functions/CLQuantizationLayer.cpp index a13859cda3..df10e1e748 100644 --- a/src/runtime/CL/functions/CLQuantizationLayer.cpp +++ b/src/runtime/CL/functions/CLQuantizationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -21,54 +21,22 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ - #include "arm_compute/runtime/CL/functions/CLQuantizationLayer.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/runtime/CL/CLScheduler.h" - -using namespace arm_compute; +#include "arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h" +#include "support/ToolchainSupport.h" -CLQuantizationLayer::CLQuantizationLayer() - : _quantize_kernel(), _min_max_kernel(), _min_max() +namespace arm_compute { -} - -Status CLQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - - TensorInfo min_max{ input->num_channels(), input->data_type() }; - ARM_COMPUTE_RETURN_ON_ERROR(CLMinMaxLayerKernel::validate(input, &min_max)); - ARM_COMPUTE_RETURN_ON_ERROR(CLQuantizationLayerKernel::validate(input, output, &min_max)); - - return Status{}; -} - void CLQuantizationLayer::configure(const ICLTensor *input, ICLTensor *output) { - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Configure min-max kernel. _min_max tensor will be auto-configured within the kernel. - _min_max_kernel.configure(input, &_min_max); - - // Configure quantize kernel - _quantize_kernel.configure(input, output, &_min_max); - - // Allocate min_max tensor - _min_max.allocator()->allocate(); + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, output); + _kernel = std::move(k); } -void CLQuantizationLayer::run() +Status CLQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output) { - cl::CommandQueue q = CLScheduler::get().queue(); - - // Reset min and max - _min_max_kernel.reset(q); - - // Run min-max kernel - CLScheduler::get().enqueue(_min_max_kernel, false); - - // Run quantize kernel - CLScheduler::get().enqueue(_quantize_kernel, false); + return CLQuantizationLayerKernel::validate(input, output); } +} // namespace arm_compute -- cgit v1.2.1