aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL
diff options
context:
space:
mode:
authorUsama Arif <usama.arif@arm.com>2019-03-11 12:20:20 +0000
committerPablo Marquez <pablo.tello@arm.com>2019-03-14 10:37:30 +0000
commite03802edd37229a1868bacedd7571cc443810caf (patch)
tree018d294c4b55a64bc0fa579f5c011baeb2aaa6a4 /src/runtime/CL
parent917959c88361e8148696c156453f69c6ae0c95c0 (diff)
downloadComputeLibrary-e03802edd37229a1868bacedd7571cc443810caf.tar.gz
COMPMID-1936: Add support for QASYMM8 in CLQuantizeLayer.
Change-Id: I9aa1f1f1753bcdee6a74ec15b4fb366f823788b4 Signed-off-by: Usama Arif <usama.arif@arm.com> Reviewed-on: https://review.mlplatform.org/c/850 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/CL')
-rw-r--r--src/runtime/CL/functions/CLQuantizationLayer.cpp52
1 files changed, 10 insertions, 42 deletions
diff --git a/src/runtime/CL/functions/CLQuantizationLayer.cpp b/src/runtime/CL/functions/CLQuantizationLayer.cpp
index a13859cda3..df10e1e748 100644
--- a/src/runtime/CL/functions/CLQuantizationLayer.cpp
+++ b/src/runtime/CL/functions/CLQuantizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,54 +21,22 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
#include "arm_compute/runtime/CL/functions/CLQuantizationLayer.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-
-using namespace arm_compute;
+#include "arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h"
+#include "support/ToolchainSupport.h"
-CLQuantizationLayer::CLQuantizationLayer()
- : _quantize_kernel(), _min_max_kernel(), _min_max()
+namespace arm_compute
{
-}
-
-Status CLQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
-
- TensorInfo min_max{ input->num_channels(), input->data_type() };
- ARM_COMPUTE_RETURN_ON_ERROR(CLMinMaxLayerKernel::validate(input, &min_max));
- ARM_COMPUTE_RETURN_ON_ERROR(CLQuantizationLayerKernel::validate(input, output, &min_max));
-
- return Status{};
-}
-
void CLQuantizationLayer::configure(const ICLTensor *input, ICLTensor *output)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Configure min-max kernel. _min_max tensor will be auto-configured within the kernel.
- _min_max_kernel.configure(input, &_min_max);
-
- // Configure quantize kernel
- _quantize_kernel.configure(input, output, &_min_max);
-
- // Allocate min_max tensor
- _min_max.allocator()->allocate();
+ auto k = arm_compute::support::cpp14::make_unique<CLQuantizationLayerKernel>();
+ k->configure(input, output);
+ _kernel = std::move(k);
}
-void CLQuantizationLayer::run()
+Status CLQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- cl::CommandQueue q = CLScheduler::get().queue();
-
- // Reset min and max
- _min_max_kernel.reset(q);
-
- // Run min-max kernel
- CLScheduler::get().enqueue(_min_max_kernel, false);
-
- // Run quantize kernel
- CLScheduler::get().enqueue(_quantize_kernel, false);
+ return CLQuantizationLayerKernel::validate(input, output);
}
+} // namespace arm_compute