aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2020-04-08 10:15:51 +0100
committerManuel Bottini <manuel.bottini@arm.com>2020-04-17 14:10:38 +0000
commit4c6bd514a8d424a29b776754f1b3426fa3a8c339 (patch)
treeed20ea238ae069ab138b62ea6879e0c1b955b606 /src/core
parentd2f6d96cdc2ad2169c4abd0a8c4884f61ed6d186 (diff)
downloadComputeLibrary-4c6bd514a8d424a29b776754f1b3426fa3a8c339.tar.gz
COMPMID-3280: Make all ML primitives for CL use the new interface - Part 1
- Only CLKernels have been updated Change-Id: Ife55b847c2e39e712a186eb6ca452503d5b66937 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3001 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLAccumulateKernel.cpp23
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp12
-rw-r--r--src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp14
-rw-r--r--src/core/CL/kernels/CLBitwiseAndKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLBitwiseNotKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLBitwiseOrKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLBitwiseXorKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLBox3x3Kernel.cpp9
-rw-r--r--src/core/CL/kernels/CLCannyEdgeKernel.cpp22
-rw-r--r--src/core/CL/kernels/CLChannelCombineKernel.cpp16
-rw-r--r--src/core/CL/kernels/CLChannelExtractKernel.cpp16
-rw-r--r--src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLCol2ImKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLColorConvertKernel.cpp28
-rw-r--r--src/core/CL/kernels/CLComparisonKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLConvolutionKernel.cpp33
-rw-r--r--src/core/CL/kernels/CLCopyKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLCropKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLDepthConvertLayerKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLDequantizationLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLDerivativeKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLDilateKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp12
-rw-r--r--src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLElementwiseOperationKernel.cpp24
-rw-r--r--src/core/CL/kernels/CLErodeKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLFFTDigitReverseKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLFFTRadixStageKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLFFTScaleKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLFastCornersKernel.cpp14
-rw-r--r--src/core/CL/kernels/CLFillBorderKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLFlattenLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp14
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLGatherKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLGaussian3x3Kernel.cpp9
-rw-r--r--src/core/CL/kernels/CLGaussian5x5Kernel.cpp16
-rw-r--r--src/core/CL/kernels/CLGaussianPyramidKernel.cpp14
-rw-r--r--src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLHOGDescriptorKernel.cpp14
-rw-r--r--src/core/CL/kernels/CLHOGDetectorKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLHarrisCornersKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLHistogramKernel.cpp14
-rw-r--r--src/core/CL/kernels/CLIm2ColKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLIntegralImageKernel.cpp14
-rw-r--r--src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLLKTrackerKernel.cpp37
-rw-r--r--src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLMagnitudePhaseKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLMeanStdDevKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLMedian3x3Kernel.cpp7
-rw-r--r--src/core/CL/kernels/CLMemsetKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLMinMaxLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLMinMaxLocationKernel.cpp15
-rw-r--r--src/core/CL/kernels/CLNonLinearFilterKernel.cpp11
-rw-r--r--src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp9
-rw-r--r--src/core/CL/kernels/CLNormalizationLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLPadLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLPermuteKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp15
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp11
-rw-r--r--src/core/CL/kernels/CLPriorBoxLayerKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLQuantizationLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLROIAlignLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLROIPoolingLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLRangeKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLReductionOperationKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLRemapKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLReorgLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLReshapeLayerKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLReverseKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLScaleKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLScharr3x3Kernel.cpp9
-rw-r--r--src/core/CL/kernels/CLSelectKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLSobel3x3Kernel.cpp7
-rw-r--r--src/core/CL/kernels/CLSobel5x5Kernel.cpp14
-rw-r--r--src/core/CL/kernels/CLSobel7x7Kernel.cpp14
-rw-r--r--src/core/CL/kernels/CLSoftmaxLayerKernel.cpp14
-rw-r--r--src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp16
-rw-r--r--src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLStackLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLStridedSliceKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLTableLookupKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLThresholdKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLTileKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLTransposeKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLUpsampleLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLWarpAffineKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLWarpPerspectiveKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLWeightsReshapeKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLWinogradInputTransformKernel.cpp7
-rw-r--r--src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLYOLOLayerKernel.cpp7
134 files changed, 1108 insertions, 197 deletions
diff --git a/src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp b/src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp
index 557046e831..52ca1d1710 100644
--- a/src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp
+++ b/src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,11 @@ CLAbsoluteDifferenceKernel::CLAbsoluteDifferenceKernel()
void CLAbsoluteDifferenceKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
+}
+
+void CLAbsoluteDifferenceKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::S16);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::S16);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16);
@@ -63,7 +68,7 @@ void CLAbsoluteDifferenceKernel::configure(const ICLTensor *input1, const ICLTen
build_opts.insert("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("absdiff", build_opts));
+ _kernel = create_kernel(compile_context, "absdiff", build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 16;
diff --git a/src/core/CL/kernels/CLAccumulateKernel.cpp b/src/core/CL/kernels/CLAccumulateKernel.cpp
index 12ee210243..aa13b4a207 100644
--- a/src/core/CL/kernels/CLAccumulateKernel.cpp
+++ b/src/core/CL/kernels/CLAccumulateKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,11 +40,16 @@ constexpr unsigned int num_elems_processed_per_iteration = 16;
void CLAccumulateKernel::configure(const ICLTensor *input, ICLTensor *accum)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, accum);
+}
+
+void CLAccumulateKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *accum)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(accum, 1, DataType::S16);
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("accumulate"));
+ _kernel = create_kernel(compile_context, "accumulate");
// Make sure _kernel is initialized before calling the parent's configure
ICLSimple2DKernel::configure(input, accum, num_elems_processed_per_iteration);
@@ -52,12 +57,17 @@ void CLAccumulateKernel::configure(const ICLTensor *input, ICLTensor *accum)
void CLAccumulateWeightedKernel::configure(const ICLTensor *input, float alpha, ICLTensor *accum)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, alpha, accum);
+}
+
+void CLAccumulateWeightedKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, float alpha, ICLTensor *accum)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(accum, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(alpha < 0.0 || alpha > 1.0);
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("accumulate_weighted"));
+ _kernel = create_kernel(compile_context, "accumulate_weighted");
// Set static kernel arguments
unsigned int idx = 2 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
@@ -69,12 +79,17 @@ void CLAccumulateWeightedKernel::configure(const ICLTensor *input, float alpha,
void CLAccumulateSquaredKernel::configure(const ICLTensor *input, uint32_t shift, ICLTensor *accum)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, shift, accum);
+}
+
+void CLAccumulateSquaredKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, uint32_t shift, ICLTensor *accum)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(accum, 1, DataType::S16);
ARM_COMPUTE_ERROR_ON(shift > 15);
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("accumulate_squared"));
+ _kernel = create_kernel(compile_context, "accumulate_squared");
// Set static kernel arguments
unsigned int idx = 2 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index 2ab0240076..15ae8e3d04 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -115,13 +115,18 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
}
} // namespace
-CLActivationLayerKernel::CLActivationLayerKernel(CLCoreRuntimeContext *ctx)
- : _input(nullptr), _output(nullptr), _run_in_place(false), _ctx(ctx)
+CLActivationLayerKernel::CLActivationLayerKernel()
+ : _input(nullptr), _output(nullptr), _run_in_place(false)
{
}
void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, act_info);
+}
+
+void CLActivationLayerKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
_run_in_place = (output == nullptr) || (output == input);
@@ -226,7 +231,8 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
}
// Create kernel
- _kernel = create_opencl_kernel(_ctx, kernel_name, build_opts);
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
+
// Make sure _kernel is initialized before calling the parent's configure
_input = input;
_output = output;
diff --git a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
index 21709a447f..4e33744094 100644
--- a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
@@ -116,6 +116,11 @@ CLArgMinMaxLayerKernel::CLArgMinMaxLayerKernel()
void CLArgMinMaxLayerKernel::configure(const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, prev_output, output, axis, op);
+}
+
+void CLArgMinMaxLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op));
auto win_config = validate_and_configure_window(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op);
@@ -167,7 +172,7 @@ void CLArgMinMaxLayerKernel::configure(const ICLTensor *input, const ICLTensor *
default:
ARM_COMPUTE_ERROR("Not supported");
}
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("arg_min_max_" + kernel_axis_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, "arg_min_max_" + kernel_axis_name, build_opts.options());
// Configure kernel window
ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
diff --git a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
index e6e2cb9c4f..610d8e8f62 100644
--- a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
@@ -86,6 +86,11 @@ CLBatchConcatenateLayerKernel::CLBatchConcatenateLayerKernel()
void CLBatchConcatenateLayerKernel::configure(const ICLTensor *input, unsigned int batch_offset, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, batch_offset, output);
+}
+
+void CLBatchConcatenateLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int batch_offset, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), batch_offset, output->info()));
@@ -111,7 +116,7 @@ void CLBatchConcatenateLayerKernel::configure(const ICLTensor *input, unsigned i
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("concatenate", build_opts.options()));
+ _kernel = create_kernel(compile_context, "concatenate", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), batch_offset, output->info());
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index a5c8b1cc6b..8776541536 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -139,6 +139,13 @@ CLBatchNormalizationLayerKernel::CLBatchNormalizationLayerKernel()
void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma,
float epsilon, ActivationLayerInfo act_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, mean, var, beta, gamma, epsilon, act_info);
+}
+
+void CLBatchNormalizationLayerKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta,
+ const ICLTensor *gamma,
+ float epsilon, ActivationLayerInfo act_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var);
_input = input;
@@ -169,7 +176,7 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
build_opts.add_option_if(gamma == nullptr, "-DUSE_DEFAULT_GAMMA");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batchnormalization_layer_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
+ _kernel = create_kernel(compile_context, "batchnormalization_layer_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
// Set kernel static arguments
unsigned int include_output = (!_run_in_place) ? 1 : 0;
diff --git a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
index 8046213488..fbdd04c424 100644
--- a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
@@ -85,6 +85,11 @@ CLBatchToSpaceLayerKernel::CLBatchToSpaceLayerKernel()
void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, block_shape, output);
+}
+
+void CLBatchToSpaceLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), block_shape->info(), output->info()));
@@ -99,7 +104,7 @@ void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const ICLTenso
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(input->info()->dimension(3)));
build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batch_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
+ _kernel = create_kernel(compile_context, "batch_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
@@ -108,6 +113,11 @@ void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const ICLTenso
void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, output);
+}
+
+void CLBatchToSpaceLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
TensorShape output_shape = compute_batch_to_space_shape(input->info(), block_shape_x, block_shape_y);
@@ -127,7 +137,7 @@ void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const int32_t
build_opts.add_option("-DBLOCK_SHAPE_X=" + support::cpp11::to_string(block_shape_x));
build_opts.add_option("-DBLOCK_SHAPE_Y=" + support::cpp11::to_string(block_shape_y));
build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batch_to_space_static_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
+ _kernel = create_kernel(compile_context, "batch_to_space_static_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
diff --git a/src/core/CL/kernels/CLBitwiseAndKernel.cpp b/src/core/CL/kernels/CLBitwiseAndKernel.cpp
index 2d05f2e2bd..df23b90310 100644
--- a/src/core/CL/kernels/CLBitwiseAndKernel.cpp
+++ b/src/core/CL/kernels/CLBitwiseAndKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,6 +39,11 @@ CLBitwiseAndKernel::CLBitwiseAndKernel()
}
void CLBitwiseAndKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
+}
+
+void CLBitwiseAndKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
@@ -48,7 +53,7 @@ void CLBitwiseAndKernel::configure(const ICLTensor *input1, const ICLTensor *inp
_output = output;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("bitwise_and"));
+ _kernel = create_kernel(compile_context, "bitwise_and");
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 16;
diff --git a/src/core/CL/kernels/CLBitwiseNotKernel.cpp b/src/core/CL/kernels/CLBitwiseNotKernel.cpp
index 0098e15ab6..2abfa46301 100644
--- a/src/core/CL/kernels/CLBitwiseNotKernel.cpp
+++ b/src/core/CL/kernels/CLBitwiseNotKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,6 +33,11 @@ using namespace arm_compute;
void CLBitwiseNotKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLBitwiseNotKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
@@ -40,7 +45,7 @@ void CLBitwiseNotKernel::configure(const ICLTensor *input, ICLTensor *output)
_output = output;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("bitwise_not"));
+ _kernel = create_kernel(compile_context, "bitwise_not");
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 16;
diff --git a/src/core/CL/kernels/CLBitwiseOrKernel.cpp b/src/core/CL/kernels/CLBitwiseOrKernel.cpp
index b3efab8b1f..8ab509ae7f 100644
--- a/src/core/CL/kernels/CLBitwiseOrKernel.cpp
+++ b/src/core/CL/kernels/CLBitwiseOrKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,6 +40,11 @@ CLBitwiseOrKernel::CLBitwiseOrKernel()
void CLBitwiseOrKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
+}
+
+void CLBitwiseOrKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
@@ -49,7 +54,7 @@ void CLBitwiseOrKernel::configure(const ICLTensor *input1, const ICLTensor *inpu
_output = output;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("bitwise_or"));
+ _kernel = create_kernel(compile_context, "bitwise_or");
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 16;
diff --git a/src/core/CL/kernels/CLBitwiseXorKernel.cpp b/src/core/CL/kernels/CLBitwiseXorKernel.cpp
index d8ac486d0f..c3ff7de820 100644
--- a/src/core/CL/kernels/CLBitwiseXorKernel.cpp
+++ b/src/core/CL/kernels/CLBitwiseXorKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,6 +40,11 @@ CLBitwiseXorKernel::CLBitwiseXorKernel()
void CLBitwiseXorKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
+}
+
+void CLBitwiseXorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
@@ -49,7 +54,7 @@ void CLBitwiseXorKernel::configure(const ICLTensor *input1, const ICLTensor *inp
_output = output;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("bitwise_xor"));
+ _kernel = create_kernel(compile_context, "bitwise_xor");
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 16;
diff --git a/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp b/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp
index bca23c7259..5ed5523632 100644
--- a/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp
+++ b/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp
@@ -90,6 +90,11 @@ CLBoundingBoxTransformKernel::CLBoundingBoxTransformKernel()
void CLBoundingBoxTransformKernel::configure(const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), boxes, pred_boxes, deltas, info);
+}
+
+void CLBoundingBoxTransformKernel::configure(CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(boxes, pred_boxes, deltas);
auto_init_if_empty(*pred_boxes->info(), deltas->info()->clone()->set_data_type(boxes->info()->data_type()).set_quantization_info(boxes->info()->quantization_info()));
@@ -137,7 +142,7 @@ void CLBoundingBoxTransformKernel::configure(const ICLTensor *boxes, ICLTensor *
// Create kernel
const std::string kernel_name = (is_quantized) ? "bounding_box_transform_quantized" : "bounding_box_transform";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Since the number of columns is a multiple of 4 by definition, we don't need to pad the tensor
const unsigned int num_elems_processed_per_iteration = 4;
diff --git a/src/core/CL/kernels/CLBox3x3Kernel.cpp b/src/core/CL/kernels/CLBox3x3Kernel.cpp
index b81697f778..e0979a8aa3 100644
--- a/src/core/CL/kernels/CLBox3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLBox3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,6 +42,11 @@ BorderSize CLBox3x3Kernel::border_size() const
void CLBox3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
+}
+
+void CLBox3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
@@ -56,7 +61,7 @@ void CLBox3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, bool b
};
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convolution3x3_static", build_opts));
+ _kernel = create_kernel(compile_context, "convolution3x3_static", build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
diff --git a/src/core/CL/kernels/CLCannyEdgeKernel.cpp b/src/core/CL/kernels/CLCannyEdgeKernel.cpp
index 08bfe90892..c1aa611566 100644
--- a/src/core/CL/kernels/CLCannyEdgeKernel.cpp
+++ b/src/core/CL/kernels/CLCannyEdgeKernel.cpp
@@ -40,6 +40,11 @@ CLGradientKernel::CLGradientKernel()
void CLGradientKernel::configure(const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, int32_t norm_type)
{
+ configure(CLKernelLibrary::get().get_compile_context(), gx, gy, magnitude, phase, norm_type);
+}
+
+void CLGradientKernel::configure(CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, int32_t norm_type)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gx, 1, DataType::S16, DataType::S32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gy, 1, DataType::S16, DataType::S32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(magnitude, 1, DataType::U16, DataType::U32);
@@ -61,7 +66,7 @@ void CLGradientKernel::configure(const ICLTensor *gx, const ICLTensor *gy, ICLTe
// Create kernel
const std::string kernel_name = (norm_type == 1) ? std::string("combine_gradients_L1") : std::string("combine_gradients_L2");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, built_opts));
+ _kernel = create_kernel(compile_context, kernel_name, built_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 4;
@@ -120,6 +125,11 @@ BorderSize CLEdgeNonMaxSuppressionKernel::border_size() const
void CLEdgeNonMaxSuppressionKernel::configure(const ICLTensor *magnitude, const ICLTensor *phase, ICLTensor *output, int32_t lower_thr, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), magnitude, phase, output, lower_thr, border_undefined);
+}
+
+void CLEdgeNonMaxSuppressionKernel::configure(CLCompileContext &compile_context, const ICLTensor *magnitude, const ICLTensor *phase, ICLTensor *output, int32_t lower_thr, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(magnitude, 1, DataType::U16, DataType::U32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(phase, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U16, DataType::U32);
@@ -135,7 +145,7 @@ void CLEdgeNonMaxSuppressionKernel::configure(const ICLTensor *magnitude, const
// Create kernel
const std::string kernel_name = std::string("suppress_non_maximum");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, built_opts));
+ _kernel = create_kernel(compile_context, kernel_name, built_opts);
// Set minimum threshold argument
unsigned int idx = 3 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
@@ -195,6 +205,12 @@ CLEdgeTraceKernel::CLEdgeTraceKernel()
void CLEdgeTraceKernel::configure(const ICLTensor *input, ICLTensor *output, int32_t upper_thr, int32_t lower_thr,
ICLTensor *visited, ICLTensor *recorded, ICLTensor *l1_stack, ICLTensor *l1_stack_counter)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, upper_thr, lower_thr, visited, recorded, l1_stack, l1_stack_counter);
+}
+
+void CLEdgeTraceKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t upper_thr, int32_t lower_thr,
+ ICLTensor *visited, ICLTensor *recorded, ICLTensor *l1_stack, ICLTensor *l1_stack_counter)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16, DataType::U32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(visited, 1, DataType::U32);
@@ -218,7 +234,7 @@ void CLEdgeTraceKernel::configure(const ICLTensor *input, ICLTensor *output, int
// Create kernel
const std::string kernel_name = std::string("hysteresis");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, built_opts));
+ _kernel = create_kernel(compile_context, kernel_name, built_opts);
// Set constant kernel args
unsigned int width = _input->info()->dimension(0);
diff --git a/src/core/CL/kernels/CLChannelCombineKernel.cpp b/src/core/CL/kernels/CLChannelCombineKernel.cpp
index d029efe110..90face2ccc 100644
--- a/src/core/CL/kernels/CLChannelCombineKernel.cpp
+++ b/src/core/CL/kernels/CLChannelCombineKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,6 +53,11 @@ CLChannelCombineKernel::CLChannelCombineKernel()
void CLChannelCombineKernel::configure(const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), plane0, plane1, plane2, plane3, output);
+}
+
+void CLChannelCombineKernel::configure(CLCompileContext &compile_context, const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(plane0, plane1, plane2, output);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane0);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane1);
@@ -109,7 +114,7 @@ void CLChannelCombineKernel::configure(const ICLTensor *plane0, const ICLTensor
// Create kernel
std::string kernel_name = "channel_combine_" + string_from_format(output_format);
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ _kernel = create_kernel(compile_context, kernel_name);
// Configure window
Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
@@ -136,6 +141,11 @@ void CLChannelCombineKernel::configure(const ICLTensor *plane0, const ICLTensor
void CLChannelCombineKernel::configure(const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), plane0, plane1, plane2, output);
+}
+
+void CLChannelCombineKernel::configure(CLCompileContext &compile_context, const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(plane0, plane1, plane2, output);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane0);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane1);
@@ -211,7 +221,7 @@ void CLChannelCombineKernel::configure(const ICLImage *plane0, const ICLImage *p
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure window
Window win = calculate_max_window(*plane0->info(), Steps(num_elems_processed_per_iteration));
diff --git a/src/core/CL/kernels/CLChannelExtractKernel.cpp b/src/core/CL/kernels/CLChannelExtractKernel.cpp
index d2a0f984da..8df162c4ee 100644
--- a/src/core/CL/kernels/CLChannelExtractKernel.cpp
+++ b/src/core/CL/kernels/CLChannelExtractKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,6 +49,11 @@ CLChannelExtractKernel::CLChannelExtractKernel()
void CLChannelExtractKernel::configure(const ICLTensor *input, Channel channel, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, channel, output);
+}
+
+void CLChannelExtractKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, Channel channel, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_ON(input == output);
@@ -89,7 +94,7 @@ void CLChannelExtractKernel::configure(const ICLTensor *input, Channel channel,
// Create kernel
std::string kernel_name = "channel_extract_" + string_from_format(format);
std::set<std::string> build_opts = { ("-DCHANNEL_" + string_from_channel(channel)) };
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure window
Window win = calculate_max_window(*input->info(), Steps(_num_elems_processed_per_iteration));
@@ -106,6 +111,11 @@ void CLChannelExtractKernel::configure(const ICLTensor *input, Channel channel,
void CLChannelExtractKernel::configure(const ICLMultiImage *input, Channel channel, ICLImage *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, channel, output);
+}
+
+void CLChannelExtractKernel::configure(CLCompileContext &compile_context, const ICLMultiImage *input, Channel channel, ICLImage *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(output);
@@ -151,7 +161,7 @@ void CLChannelExtractKernel::configure(const ICLMultiImage *input, Channel chann
kernel_name = "channel_extract_" + string_from_format(format);
build_opts.insert(("-DCHANNEL_" + string_from_channel(channel)));
}
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure window
Window win = calculate_max_window(*input_plane->info(), Steps(_num_elems_processed_per_iteration));
diff --git a/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp b/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp
index 27bf6b9034..5e6bbb395b 100644
--- a/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp
+++ b/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp
@@ -92,6 +92,11 @@ CLChannelShuffleLayerKernel::CLChannelShuffleLayerKernel()
void CLChannelShuffleLayerKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int num_groups)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, num_groups);
+}
+
+void CLChannelShuffleLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
_input = input;
@@ -116,7 +121,7 @@ void CLChannelShuffleLayerKernel::configure(const ICLTensor *input, ICLTensor *o
// Create kernel
std::string kernel_name = "channel_shuffle_" + lower_string(string_from_data_layout(data_layout));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info());
diff --git a/src/core/CL/kernels/CLCol2ImKernel.cpp b/src/core/CL/kernels/CLCol2ImKernel.cpp
index 643c2aaa84..d96ec96126 100644
--- a/src/core/CL/kernels/CLCol2ImKernel.cpp
+++ b/src/core/CL/kernels/CLCol2ImKernel.cpp
@@ -91,6 +91,11 @@ CLCol2ImKernel::CLCol2ImKernel()
void CLCol2ImKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, convolved_dims, num_groups);
+}
+
+void CLCol2ImKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Perform validation step
@@ -110,7 +115,7 @@ void CLCol2ImKernel::configure(const ICLTensor *input, ICLTensor *output, const
build_opts.add_option("-DWIDTH_OUTPUT=" + support::cpp11::to_string(_convolved_dims.width));
build_opts.add_option("-DNUM_GROUPS=" + support::cpp11::to_string(num_groups));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("col2im", build_opts.options()));
+ _kernel = create_kernel(compile_context, "col2im", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info(), _convolved_dims, num_groups);
diff --git a/src/core/CL/kernels/CLColorConvertKernel.cpp b/src/core/CL/kernels/CLColorConvertKernel.cpp
index f2eb848684..720d925427 100644
--- a/src/core/CL/kernels/CLColorConvertKernel.cpp
+++ b/src/core/CL/kernels/CLColorConvertKernel.cpp
@@ -48,6 +48,11 @@ CLColorConvertKernel::CLColorConvertKernel()
void CLColorConvertKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLColorConvertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON(input == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr);
@@ -114,7 +119,7 @@ void CLColorConvertKernel::configure(const ICLTensor *input, ICLTensor *output)
_output = output;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name.str()));
+ _kernel = create_kernel(compile_context, kernel_name.str());
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
@@ -139,6 +144,11 @@ void CLColorConvertKernel::configure(const ICLTensor *input, ICLTensor *output)
void CLColorConvertKernel::configure(const ICLMultiImage *input, ICLImage *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLColorConvertKernel::configure(CLCompileContext &compile_context, const ICLMultiImage *input, ICLImage *output)
+{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(output);
ARM_COMPUTE_ERROR_ON(output == nullptr);
@@ -180,7 +190,7 @@ void CLColorConvertKernel::configure(const ICLMultiImage *input, ICLImage *outpu
_output = output;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name.str()));
+ _kernel = create_kernel(compile_context, kernel_name.str());
// Configure kernel window
const bool has_two_planes = (input->info()->format() == Format::NV12) || (input->info()->format() == Format::NV21);
@@ -224,6 +234,11 @@ void CLColorConvertKernel::configure(const ICLMultiImage *input, ICLImage *outpu
void CLColorConvertKernel::configure(const ICLImage *input, ICLMultiImage *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLColorConvertKernel::configure(CLCompileContext &compile_context, const ICLImage *input, ICLMultiImage *output)
+{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON(output == nullptr);
@@ -289,7 +304,7 @@ void CLColorConvertKernel::configure(const ICLImage *input, ICLMultiImage *outpu
_multi_output = output;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name.str()));
+ _kernel = create_kernel(compile_context, kernel_name.str());
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
@@ -331,6 +346,11 @@ void CLColorConvertKernel::configure(const ICLImage *input, ICLMultiImage *outpu
void CLColorConvertKernel::configure(const ICLMultiImage *input, ICLMultiImage *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLColorConvertKernel::configure(CLCompileContext &compile_context, const ICLMultiImage *input, ICLMultiImage *output)
+{
unsigned int num_elems_processed_per_iteration = 0;
switch(input->info()->format())
{
@@ -387,7 +407,7 @@ void CLColorConvertKernel::configure(const ICLMultiImage *input, ICLMultiImage *
float sub_sampling_input = (has_two_input_planars || (input->info()->format() == Format::IYUV)) ? 0.5f : 1;
float sub_sampling_output = (has_two_output_planars || (output->info()->format() == Format::IYUV)) ? 0.5f : 1;
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name.str()));
+ _kernel = create_kernel(compile_context, kernel_name.str());
Window win = calculate_max_window(*input->cl_plane(0)->info(), Steps(num_elems_processed_per_iteration));
win.set_dimension_step(Window::DimY, 2);
diff --git a/src/core/CL/kernels/CLComparisonKernel.cpp b/src/core/CL/kernels/CLComparisonKernel.cpp
index e010743bad..61aeebea5a 100644
--- a/src/core/CL/kernels/CLComparisonKernel.cpp
+++ b/src/core/CL/kernels/CLComparisonKernel.cpp
@@ -108,6 +108,11 @@ CLComparisonKernel::CLComparisonKernel()
void CLComparisonKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, operation);
+}
+
+void CLComparisonKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info(), operation));
@@ -141,7 +146,7 @@ void CLComparisonKernel::configure(const ICLTensor *input1, const ICLTensor *inp
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
ICLKernel::configure_internal(win_config.second);
diff --git a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
index 016a7bbf4a..f57ff6c07e 100644
--- a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
+++ b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
@@ -41,6 +41,12 @@ CLConvertFullyConnectedWeightsKernel::CLConvertFullyConnectedWeightsKernel()
void CLConvertFullyConnectedWeightsKernel::configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape,
DataLayout data_layout)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, original_input_shape, data_layout);
+}
+
+void CLConvertFullyConnectedWeightsKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape,
+ DataLayout data_layout)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Output tensor auto initialisation if not yet initialized
@@ -70,7 +76,7 @@ void CLConvertFullyConnectedWeightsKernel::configure(const ICLTensor *input, ICL
build_opts.add_option("-DFACTOR_2=" + support::cpp11::to_string(factor_2));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convert_fc_weights", build_opts.options()));
+ _kernel = create_kernel(compile_context, "convert_fc_weights", build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
diff --git a/src/core/CL/kernels/CLConvolutionKernel.cpp b/src/core/CL/kernels/CLConvolutionKernel.cpp
index 2e1c56c3ba..3cc6d24de2 100644
--- a/src/core/CL/kernels/CLConvolutionKernel.cpp
+++ b/src/core/CL/kernels/CLConvolutionKernel.cpp
@@ -60,6 +60,12 @@ BorderSize CLConvolutionKernel<matrix_size>::border_size() const
template <unsigned int matrix_size>
void CLConvolutionKernel<matrix_size>::configure(const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, conv, scale, border_undefined);
+}
+
+template <unsigned int matrix_size>
+void CLConvolutionKernel<matrix_size>::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16);
ARM_COMPUTE_ERROR_ON(conv == nullptr);
@@ -92,7 +98,7 @@ void CLConvolutionKernel<matrix_size>::configure(const ICLTensor *input, ICLTens
out_type << "-DDATA_TYPE_OUT=" << get_cl_type_from_data_type(output->info()->data_type());
build_opts.add_option(out_type.str());
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name.str(), build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name.str(), build_opts.options());
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
@@ -130,6 +136,12 @@ BorderSize CLSeparableConvolutionHorKernel<matrix_size>::border_size
template <unsigned int matrix_size>
void CLSeparableConvolutionHorKernel<matrix_size>::configure(const ICLTensor *input, ICLTensor *output, const int16_t *conv, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, conv, border_undefined);
+}
+
+template <unsigned int matrix_size>
+void CLSeparableConvolutionHorKernel<matrix_size>::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U16, DataType::S16, DataType::S32);
@@ -156,7 +168,7 @@ void CLSeparableConvolutionHorKernel<matrix_size>::configure(const ICLTensor *in
// Create kernel
const std::string kernel_name = "convolution_separable1x" + support::cpp11::to_string(matrix_size) + "_static";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
@@ -200,6 +212,13 @@ template <unsigned int matrix_size>
void CLSeparableConvolutionVertKernel<matrix_size>::configure(const ICLTensor *input, ICLTensor *output,
const int16_t *conv, uint32_t scale, bool border_undefined, DataType data_type)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, conv, scale, border_undefined, data_type);
+}
+
+template <unsigned int matrix_size>
+void CLSeparableConvolutionVertKernel<matrix_size>::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output,
+ const int16_t *conv, uint32_t scale, bool border_undefined, DataType data_type)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16, DataType::S16, DataType::S32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16);
ARM_COMPUTE_ERROR_ON((matrix_size != 5) && (matrix_size != 7) && (matrix_size != 9));
@@ -230,7 +249,7 @@ void CLSeparableConvolutionVertKernel<matrix_size>::configure(const ICLTensor *i
// Create kernel
const std::string kernel_name = "convolution_separable" + support::cpp11::to_string(matrix_size) + "x1_static";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
@@ -281,6 +300,12 @@ BorderSize CLConvolutionRectangleKernel::border_size() const
void CLConvolutionRectangleKernel::configure(const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, conv, width, height, scale, border_undefined);
+}
+
+void CLConvolutionRectangleKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale,
+ bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16);
ARM_COMPUTE_ERROR_ON(nullptr == conv);
@@ -317,7 +342,7 @@ void CLConvolutionRectangleKernel::configure(const ICLTensor *input, ICLTensor *
options.insert("-DMATRIX_WIDTH=" + support::cpp11::to_string(width));
options.insert("-DMATRIX_HEIGHT=" + support::cpp11::to_string(height));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convolution_rectangle", options));
+ _kernel = create_kernel(compile_context, "convolution_rectangle", options);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
diff --git a/src/core/CL/kernels/CLCopyKernel.cpp b/src/core/CL/kernels/CLCopyKernel.cpp
index 8a7353bcc2..e59223e511 100644
--- a/src/core/CL/kernels/CLCopyKernel.cpp
+++ b/src/core/CL/kernels/CLCopyKernel.cpp
@@ -157,6 +157,11 @@ CLCopyKernel::CLCopyKernel()
void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding, Window *output_window)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, padding, output_window);
+}
+
+void CLCopyKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, Window *output_window)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding, output_window));
@@ -199,7 +204,7 @@ void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const Pa
}
// Build kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("copy_tensor", build_opts.options()));
+ _kernel = create_kernel(compile_context, "copy_tensor", build_opts.options());
}
else
{
@@ -217,7 +222,7 @@ void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const Pa
}
// Build kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("copy_pad_tensor", build_opts.options()));
+ _kernel = create_kernel(compile_context, "copy_pad_tensor", build_opts.options());
// Configure window
win_config = validate_and_configure_window_with_padding(input->info(), output->info(), padding);
diff --git a/src/core/CL/kernels/CLCropKernel.cpp b/src/core/CL/kernels/CLCropKernel.cpp
index e51861e9aa..2c17c99559 100644
--- a/src/core/CL/kernels/CLCropKernel.cpp
+++ b/src/core/CL/kernels/CLCropKernel.cpp
@@ -49,6 +49,12 @@ CLCropKernel::CLCropKernel()
void CLCropKernel::configure(const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value, Window *output_window)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, start, end, batch_index, extrapolation_value, output_window);
+}
+
+void CLCropKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value,
+ Window *output_window)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), start, end, batch_index, extrapolation_value, output_window));
@@ -86,7 +92,7 @@ void CLCropKernel::configure(const ICLTensor *input, ICLTensor *output, Coordina
build_opts.add_option_if(multi_access_x && remainder_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
build_opts.add_option_if(start.x > end.x, "-DWIDTH_FLIPPED=");
build_opts.add_option_if(start.y > end.y, "-DHEIGHT_FLIPPED=");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("crop_tensor", build_opts.options()));
+ _kernel = create_kernel(compile_context, "crop_tensor", build_opts.options());
}
Status CLCropKernel::validate(const ITensorInfo *input, const ITensorInfo *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value, Window *output_window)
diff --git a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp
index ee392032ca..f92f7da37f 100644
--- a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp
+++ b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp
@@ -70,6 +70,12 @@ Status CLDeconvolutionLayerUpsampleKernel::validate(const ITensorInfo *input, co
void CLDeconvolutionLayerUpsampleKernel::configure(const ICLTensor *input, ICLTensor *output,
const PadStrideInfo &info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
+}
+
+void CLDeconvolutionLayerUpsampleKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output,
+ const PadStrideInfo &info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Perform validation step
@@ -83,7 +89,7 @@ void CLDeconvolutionLayerUpsampleKernel::configure(const ICLTensor *input, ICLTe
// Create kernel
CLBuildOptions build_opts;
build_opts.add_option(("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size())));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("deconvolution_upsample", build_opts.options()));
+ _kernel = create_kernel(compile_context, "deconvolution_upsample", build_opts.options());
constexpr unsigned int num_elems_processed_per_iteration = 1;
diff --git a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
index 2704c74ff9..68607e9fc6 100644
--- a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
+++ b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
@@ -116,6 +116,13 @@ CLDeconvolutionReshapeOutputKernel::CLDeconvolutionReshapeOutputKernel()
void CLDeconvolutionReshapeOutputKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
const PadStrideInfo &deconv_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, input_info, weights_info, deconv_info);
+}
+
+void CLDeconvolutionReshapeOutputKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info,
+ const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, input_info, weights_info);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), input_info, weights_info, deconv_info));
@@ -148,7 +155,7 @@ void CLDeconvolutionReshapeOutputKernel::configure(const ICLTensor *input, const
build_opts.add_option_if(data_layout == DataLayout::NCHW, "-DNUM_FILTERS=" + support::cpp11::to_string(filter_b));
build_opts.add_option_if(_add_bias, "-DADD_BIAS");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("deconvolution_reshape", build_opts.options()));
+ _kernel = create_kernel(compile_context, "deconvolution_reshape", build_opts.options());
ICLKernel::configure_internal(win_config.second);
// Set config_id for enabling LWS tuning
diff --git a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
index 99489036b4..241adb297b 100644
--- a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
@@ -84,6 +84,11 @@ CLDepthConcatenateLayerKernel::CLDepthConcatenateLayerKernel()
void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned int depth_offset, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, depth_offset, output);
+}
+
+void CLDepthConcatenateLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int depth_offset, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), depth_offset, output->info()));
@@ -109,7 +114,7 @@ void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned i
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("concatenate", build_opts.options()));
+ _kernel = create_kernel(compile_context, "concatenate", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), depth_offset, output->info());
diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
index 13687a540d..2e29dbf92a 100644
--- a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
@@ -74,6 +74,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, C
void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, shift);
+}
+
+void CLDepthConvertLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given)
@@ -100,14 +105,14 @@ void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *out
// Create kernel
const std::string kernel_name = (input_size >= output_size) ? "convert_depth_down" : "convert_depth_up";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set shift arg
unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters
_kernel.setArg(idx++, shift);
// Configure kernel
- ICLSimple3DKernel::configure(input, output, num_elems_processed_per_iteration);
+ ICLSimple2DKernel::configure(input, output, num_elems_processed_per_iteration);
// Collapse window
const Window &full_window = window();
diff --git a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp
index ad0d398c88..cd61a91ec5 100644
--- a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp
@@ -67,6 +67,11 @@ CLDepthToSpaceLayerKernel::CLDepthToSpaceLayerKernel()
void CLDepthToSpaceLayerKernel::configure(const ICLTensor *input, ICLTensor *output, int32_t block_shape)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, block_shape);
+}
+
+void CLDepthToSpaceLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
TensorShape output_shape = compute_depth_to_space_shape(input->info(), block_shape);
@@ -87,7 +92,7 @@ void CLDepthToSpaceLayerKernel::configure(const ICLTensor *input, ICLTensor *out
build_opts.add_option("-DCHANNEL_SIZE=" + support::cpp11::to_string(input->info()->dimension(idx_channel)));
build_opts.add_option("-DBLOCK_SHAPE=" + support::cpp11::to_string(block_shape));
build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("depth_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
+ _kernel = create_kernel(compile_context, "depth_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
index a9875675c5..e293fa264f 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
@@ -246,6 +246,13 @@ void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const ICLTensor *input,
const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts);
+}
+
+void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+ const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
+ const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
conv_info, depth_multiplier, act_info, dilation,
@@ -337,7 +344,7 @@ void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const ICLTensor *input,
build_opts.add_option_if(input->info()->data_type() == DataType::F16, "-DIS_F16");
build_opts.add_option_if(input->info()->data_type() == DataType::F32, "-DIS_F32");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
index 4afca2b31f..71af63a97f 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
@@ -199,6 +199,13 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input,
const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts);
+}
+
+void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+ const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
+ const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
conv_info, depth_multiplier, act_info, dilation,
@@ -328,7 +335,7 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input,
build_opts.add_option_if(input->info()->data_type() == DataType::F32, "-DIS_F32");
ICLKernel::configure_internal(win_config.second);
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
index b4025ffdaf..45df9ed59a 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
@@ -196,6 +196,14 @@ void CLDepthwiseConvolutionLayerNativeKernel::configure(const ICLTensor *input,
const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation, output_multipliers, output_shifts);
+}
+
+void CLDepthwiseConvolutionLayerNativeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+ const DWCWeightsKernelInfo &dwc_weights_info,
+ const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
+ const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation,
@@ -284,7 +292,7 @@ void CLDepthwiseConvolutionLayerNativeKernel::configure(const ICLTensor *input,
}
ICLKernel::configure_internal(win_config.second);
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp
index f746f1feff..7e38e77107 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp
@@ -88,6 +88,11 @@ CLDepthwiseConvolutionLayerReshapeWeightsKernel::CLDepthwiseConvolutionLayerResh
void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
+}
+
+void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), info));
auto win_config = validate_and_configure_window(input->info(), output->info(), info);
@@ -105,7 +110,7 @@ void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(const ICLTensor
build_opts.add_option_if(info.transpose, "-DTRANSPOSE");
build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("depthwise_convolution_reshape_weights", build_opts.options()));
+ _kernel = create_kernel(compile_context, "depthwise_convolution_reshape_weights", build_opts.options());
}
Status CLDepthwiseConvolutionLayerReshapeWeightsKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info)
diff --git a/src/core/CL/kernels/CLDequantizationLayerKernel.cpp b/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
index 5d9a8faa54..ae7489f0a8 100644
--- a/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
@@ -77,6 +77,11 @@ CLDequantizationLayerKernel::CLDequantizationLayerKernel()
void CLDequantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLDequantizationLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
@@ -120,7 +125,7 @@ void CLDequantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *o
build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
// Create kernel name
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
}
Status CLDequantizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
diff --git a/src/core/CL/kernels/CLDerivativeKernel.cpp b/src/core/CL/kernels/CLDerivativeKernel.cpp
index bfda041104..670cde308c 100644
--- a/src/core/CL/kernels/CLDerivativeKernel.cpp
+++ b/src/core/CL/kernels/CLDerivativeKernel.cpp
@@ -50,6 +50,11 @@ BorderSize CLDerivativeKernel::border_size() const
void CLDerivativeKernel::configure(const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined);
+}
+
+void CLDerivativeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr));
@@ -85,7 +90,7 @@ void CLDerivativeKernel::configure(const ICLTensor *input, ICLTensor *output_x,
// Create kernel
const std::string kernel_name = std::string("derivative");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 16;
diff --git a/src/core/CL/kernels/CLDilateKernel.cpp b/src/core/CL/kernels/CLDilateKernel.cpp
index 89853d7b19..0f6879dcc8 100644
--- a/src/core/CL/kernels/CLDilateKernel.cpp
+++ b/src/core/CL/kernels/CLDilateKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,11 +38,16 @@ BorderSize CLDilateKernel::border_size() const
void CLDilateKernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
+}
+
+void CLDilateKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("dilate"));
+ _kernel = create_kernel(compile_context, "dilate");
_input = input;
_output = output;
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
index d1dfcd9bb1..ff3d106f46 100644
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
@@ -423,6 +423,12 @@ BorderSize CLDirectConvolutionLayerKernel::border_size() const
void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info);
+}
+
+void CLDirectConvolutionLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+ const PadStrideInfo &conv_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
_data_layout = input->info()->data_layout();
@@ -491,7 +497,7 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
kernel_name << "_f32_bifrost";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name.str(), build_options.options()));
+ _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
}
else
{
@@ -535,7 +541,7 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
build_options.add_option("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("direct_convolution_quantized", build_options.options()));
+ _kernel = create_kernel(compile_context, "direct_convolution_quantized", build_options.options());
// Set static kernel arguments
unsigned int idx = 3 * num_arguments_per_3D_tensor() + ((_biases != nullptr) ? num_arguments_per_1D_tensor() : 0) + 1;
@@ -546,7 +552,7 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
else
{
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name.str(), build_options.options()));
+ _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
}
}
diff --git a/src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp b/src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp
index a1aada4990..7356c5a5cd 100644
--- a/src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp
+++ b/src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp
@@ -52,6 +52,11 @@ Status validate_arguments(const ITensorInfo &input, const ITensorInfo &output)
void CLElementWiseUnaryLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const ElementWiseUnary &op)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, op);
+}
+
+void CLElementWiseUnaryLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ElementWiseUnary &op)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input->info(), *output->info()));
@@ -105,7 +110,7 @@ void CLElementWiseUnaryLayerKernel::configure(const ICLTensor *input, ICLTensor
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
}
Status CLElementWiseUnaryLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ElementWiseUnary &op)
diff --git a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
index 0f2e26f186..ee4ef40b87 100644
--- a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
+++ b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
@@ -237,6 +237,11 @@ CLElementwiseOperationKernel::CLElementwiseOperationKernel()
void CLElementwiseOperationKernel::configure_common(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
+ configure_common(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
+}
+
+void CLElementwiseOperationKernel::configure_common(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info()));
@@ -264,7 +269,7 @@ void CLElementwiseOperationKernel::configure_common(const ICLTensor *input1, con
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
ICLKernel::configure_internal(win_config.second);
@@ -329,10 +334,17 @@ BorderSize CLElementwiseOperationKernel::border_size() const
void CLSaturatedArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy,
const ActivationLayerInfo &act_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), op, input1, input2, output, policy, act_info);
+}
+
+void CLSaturatedArithmeticOperationKernel::configure(CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
+ const ConvertPolicy &policy,
+ const ActivationLayerInfo &act_info)
+{
_policy = policy;
_op = op;
_act_info = act_info;
- configure_common(input1, input2, output);
+ configure_common(compile_context, input1, input2, output);
}
Status CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy,
@@ -381,9 +393,15 @@ std::string CLSaturatedArithmeticOperationKernel::name()
void CLArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), op, input1, input2, output, act_info);
+}
+
+void CLArithmeticOperationKernel::configure(CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
+ const ActivationLayerInfo &act_info)
+{
_op = op;
_act_info = act_info;
- configure_common(input1, input2, output);
+ configure_common(compile_context, input1, input2, output);
}
Status CLArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
diff --git a/src/core/CL/kernels/CLErodeKernel.cpp b/src/core/CL/kernels/CLErodeKernel.cpp
index e56b71a75e..e959d1c320 100644
--- a/src/core/CL/kernels/CLErodeKernel.cpp
+++ b/src/core/CL/kernels/CLErodeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,11 +38,16 @@ BorderSize CLErodeKernel::border_size() const
void CLErodeKernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
+}
+
+void CLErodeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("erode"));
+ _kernel = create_kernel(compile_context, "erode");
_input = input;
_output = output;
diff --git a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
index 822f155b85..5542ad72f4 100644
--- a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
+++ b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
@@ -75,6 +75,11 @@ CLFFTDigitReverseKernel::CLFFTDigitReverseKernel()
void CLFFTDigitReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, idx, config);
+}
+
+void CLFFTDigitReverseKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, idx);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), idx->info(), config));
@@ -87,7 +92,7 @@ void CLFFTDigitReverseKernel::configure(const ICLTensor *input, ICLTensor *outpu
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(input->info()->num_channels()));
build_opts.add_option_if(config.conjugate, "-DCONJ");
std::string kernel_name = "fft_digit_reverse_axis_" + support::cpp11::to_string(config.axis);
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info(), idx->info(), config);
diff --git a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
index 73df5b226c..6e7e1eff06 100644
--- a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
+++ b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
@@ -85,6 +85,11 @@ CLFFTRadixStageKernel::CLFFTRadixStageKernel()
void CLFFTRadixStageKernel::configure(ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, config);
+}
+
+void CLFFTRadixStageKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr, config));
@@ -101,7 +106,7 @@ void CLFFTRadixStageKernel::configure(ICLTensor *input, ICLTensor *output, const
kernel_name += "_radix_" + support::cpp11::to_string(config.radix);
kernel_name += (config.is_first_stage) ? "_first_stage" : "";
kernel_name += "_axis_" + support::cpp11::to_string(config.axis);
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set static arguments if not the first stage
if(!config.is_first_stage)
diff --git a/src/core/CL/kernels/CLFFTScaleKernel.cpp b/src/core/CL/kernels/CLFFTScaleKernel.cpp
index 312c73746d..32e652ad1d 100644
--- a/src/core/CL/kernels/CLFFTScaleKernel.cpp
+++ b/src/core/CL/kernels/CLFFTScaleKernel.cpp
@@ -78,6 +78,11 @@ CLFFTScaleKernel::CLFFTScaleKernel()
void CLFFTScaleKernel::configure(ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, config);
+}
+
+void CLFFTScaleKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr));
@@ -91,7 +96,7 @@ void CLFFTScaleKernel::configure(ICLTensor *input, ICLTensor *output, const FFTS
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(output != nullptr ? output->info()->num_channels() : input->info()->num_channels()));
build_opts.add_option_if(config.conjugate, "-DCONJ");
std::string kernel_name = "fft_scale_conj";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set static arguments
unsigned int idx = (1 + (_run_in_place ? 0 : 1)) * num_arguments_per_3D_tensor(); // Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLFastCornersKernel.cpp b/src/core/CL/kernels/CLFastCornersKernel.cpp
index 79ab3b7197..2bd4d89bbc 100644
--- a/src/core/CL/kernels/CLFastCornersKernel.cpp
+++ b/src/core/CL/kernels/CLFastCornersKernel.cpp
@@ -50,6 +50,11 @@ BorderSize CLFastCornersKernel::border_size() const
void CLFastCornersKernel::configure(const ICLImage *input, ICLImage *output, float threshold, bool non_max_suppression, BorderMode border_mode)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, threshold, non_max_suppression, border_mode);
+}
+
+void CLFastCornersKernel::configure(CLCompileContext &compile_context, const ICLImage *input, ICLImage *output, float threshold, bool non_max_suppression, BorderMode border_mode)
+{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(output);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
@@ -69,7 +74,7 @@ void CLFastCornersKernel::configure(const ICLImage *input, ICLImage *output, flo
// Create kernel
const std::string kernel_name = std::string("fast_corners");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Set static kernel arguments
unsigned int idx = 2 * num_arguments_per_2D_tensor(); // Skip the input and output parameters
@@ -133,6 +138,11 @@ CLCopyToArrayKernel::CLCopyToArrayKernel()
void CLCopyToArrayKernel::configure(const ICLImage *input, bool update_number, ICLKeyPointArray *corners, cl::Buffer *num_buffers)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, update_number, corners, num_buffers);
+}
+
+void CLCopyToArrayKernel::configure(CLCompileContext &compile_context, const ICLImage *input, bool update_number, ICLKeyPointArray *corners, cl::Buffer *num_buffers)
+{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(corners == nullptr);
@@ -151,7 +161,7 @@ void CLCopyToArrayKernel::configure(const ICLImage *input, bool update_number, I
// Create kernel
const std::string kernel_name = std::string("copy_to_keypoint");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
//Get how many pixels skipped in the x dimension in the previous stages
unsigned int offset = _input->info()->valid_region().anchor.x();
diff --git a/src/core/CL/kernels/CLFillBorderKernel.cpp b/src/core/CL/kernels/CLFillBorderKernel.cpp
index c7a83fc2a4..c69a8c9f92 100644
--- a/src/core/CL/kernels/CLFillBorderKernel.cpp
+++ b/src/core/CL/kernels/CLFillBorderKernel.cpp
@@ -62,6 +62,11 @@ void CLFillBorderKernel::set_constant_border(unsigned int idx, const PixelValue
void CLFillBorderKernel::configure(ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value)
{
+ configure(CLKernelLibrary::get().get_compile_context(), tensor, border_size, border_mode, constant_border_value);
+}
+
+void CLFillBorderKernel::configure(CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value)
+{
ARM_COMPUTE_ERROR_ON(tensor == nullptr);
ARM_COMPUTE_ERROR_ON(tensor->info()->num_channels() != 1);
@@ -87,7 +92,7 @@ void CLFillBorderKernel::configure(ICLTensor *tensor, BorderSize border_size, Bo
build_opts.add_option("-DBORDER_SIZE_RIGHT=" + support::cpp11::to_string(border_size.right));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
_tensor = tensor;
// Create static kernel arguments
diff --git a/src/core/CL/kernels/CLFlattenLayerKernel.cpp b/src/core/CL/kernels/CLFlattenLayerKernel.cpp
index c078b0d14a..c2dc933f5a 100644
--- a/src/core/CL/kernels/CLFlattenLayerKernel.cpp
+++ b/src/core/CL/kernels/CLFlattenLayerKernel.cpp
@@ -82,6 +82,11 @@ CLFlattenLayerKernel::CLFlattenLayerKernel()
void CLFlattenLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLFlattenLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
@@ -101,7 +106,7 @@ void CLFlattenLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
build_opts.add_option_if(output->info()->num_dimensions() > 2, "-DDST_DIM1=" + support::cpp11::to_string(output->info()->dimension(1)));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("flatten", build_opts.options()));
+ _kernel = create_kernel(compile_context, "flatten", build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = "flatten";
diff --git a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp
index 1e1c3e0eef..6f4ba0ed06 100644
--- a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp
+++ b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp
@@ -109,6 +109,14 @@ void CLFuseBatchNormalizationKernel::configure(const ICLTensor *input_weights, c
const ICLTensor *input_bias, const ICLTensor *bn_beta, const ICLTensor *bn_gamma,
float epsilon, FuseBatchNormalizationType fbn_type)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, bn_beta, bn_gamma, epsilon, fbn_type);
+}
+
+void CLFuseBatchNormalizationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var,
+ ICLTensor *fused_weights, ICLTensor *fused_bias,
+ const ICLTensor *input_bias, const ICLTensor *bn_beta, const ICLTensor *bn_gamma,
+ float epsilon, FuseBatchNormalizationType fbn_type)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input_weights, bn_mean, bn_var);
_input_weights = input_weights;
@@ -162,7 +170,7 @@ void CLFuseBatchNormalizationKernel::configure(const ICLTensor *input_weights, c
build_opts.add_option_if(bn_gamma != nullptr, "-DGAMMA");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("fuse_batchnormalization_layer", build_opts.options()));
+ _kernel = create_kernel(compile_context, "fuse_batchnormalization_layer", build_opts.options());
}
Status CLFuseBatchNormalizationKernel::validate(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensorInfo *bn_var,
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
index d13884f267..0d4bbba0d4 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
@@ -172,6 +172,11 @@ CLGEMMLowpMatrixMultiplyKernel::CLGEMMLowpMatrixMultiplyKernel()
void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMReshapeInfo &gemm_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, gemm_info);
+}
+
+void CLGEMMLowpMatrixMultiplyKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMReshapeInfo &gemm_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), gemm_info));
@@ -218,7 +223,7 @@ void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const IC
kernel_name = "gemmlowp_mm_midgard";
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
index 82ba824848..bcf71565af 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
@@ -173,6 +173,13 @@ CLGEMMLowpMatrixMultiplyNativeKernel::CLGEMMLowpMatrixMultiplyNativeKernel()
void CLGEMMLowpMatrixMultiplyNativeKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
const GEMMReshapeInfo &gemm_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, lhs_info, rhs_info, gemm_info);
+}
+
+void CLGEMMLowpMatrixMultiplyNativeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info,
+ const GEMMReshapeInfo &gemm_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info));
@@ -223,7 +230,7 @@ void CLGEMMLowpMatrixMultiplyNativeKernel::configure(const ICLTensor *input0, co
std::string kernel_name("gemmlowp_mm_native");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp
index dd1f221a1f..ebb00a45d5 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp
@@ -168,6 +168,13 @@ CLGEMMLowpMatrixMultiplyReshapedKernel::CLGEMMLowpMatrixMultiplyReshapedKernel()
void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
const GEMMReshapeInfo &gemm_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, lhs_info, rhs_info, gemm_info);
+}
+
+void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info,
+ const GEMMReshapeInfo &gemm_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info));
@@ -214,7 +221,7 @@ void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0,
kernel_name += rhs_info.transpose ? "rhs_t" : "rhs_nt";
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
index cef5b34de0..dd4c55c2d8 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
@@ -310,6 +310,13 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *i
const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, gemm_info, vector_sum_col, vector_sum_row, bias, output_multipliers, output_shifts);
+}
+
+void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info,
+ const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias,
+ const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(),
input1->info(),
@@ -420,7 +427,7 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *i
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
index 49905fe3e0..fd2cc7a680 100644
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
@@ -145,6 +145,13 @@ CLGEMMLowpOffsetContributionKernel::CLGEMMLowpOffsetContributionKernel()
void CLGEMMLowpOffsetContributionKernel::configure(ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset,
int32_t b_offset)
{
+ configure(CLKernelLibrary::get().get_compile_context(), mm_result, vector_sum_col, vector_sum_row, bias, k, a_offset, b_offset);
+}
+
+void CLGEMMLowpOffsetContributionKernel::configure(CLCompileContext &compile_context, ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias,
+ int32_t k, int32_t a_offset,
+ int32_t b_offset)
+{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
@@ -182,7 +189,7 @@ void CLGEMMLowpOffsetContributionKernel::configure(ICLTensor *mm_result, const I
std::string kernel_name("gemmlowp_offset_contribution");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(mm_result->info(),
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
index bae0a134ea..d52fb21574 100644
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
@@ -184,6 +184,14 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *m
int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
+ configure(CLKernelLibrary::get().get_compile_context(), mm_result, vector_sum_col, vector_sum_row, bias, output, k, a_offset, b_offset, output_stage, output_multipliers, output_shifts);
+}
+
+void CLGEMMLowpOffsetContributionOutputStageKernel::configure(CLCompileContext &compile_context, const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row,
+ const ICLTensor *bias, ICLTensor *output,
+ int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
+ const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
+{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output, output_multipliers, output_shifts);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
@@ -242,7 +250,7 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *m
kernel_name += "_" + string_from_gemmlowp_output_stage(output_stage.type);
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(mm_result->info(),
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
index 5a554f3111..171dc48112 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
@@ -117,6 +117,12 @@ Status CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::validate(const ITensorInfo
void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
const GEMMLowpOutputStageInfo *info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, info);
+}
+
+void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
+ const GEMMLowpOutputStageInfo *info)
+{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info));
@@ -138,7 +144,7 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const ICLTensor *i
build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemmlowp_output_stage_quantize_down_float", build_opts.options()));
+ _kernel = create_kernel(compile_context, "gemmlowp_output_stage_quantize_down_float", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info->output_data_type);
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
index 002af6b471..ca85e8b655 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
@@ -107,6 +107,11 @@ Status CLGEMMLowpQuantizeDownInt32ScaleKernel::validate(const ITensorInfo *input
void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, output_stage);
+}
+
+void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage)
+{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
@@ -135,7 +140,7 @@ void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const ICLTensor *input, c
build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemmlowp_output_stage_quantize_down", build_opts.options()));
+ _kernel = create_kernel(compile_context, "gemmlowp_output_stage_quantize_down", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), output_stage->output_data_type);
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp
index 22300b952d..00cef56db7 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp
@@ -123,6 +123,13 @@ void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::configure(const
int result_fixedpoint_multiplier, int result_shift,
int min, int max)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, result_fixedpoint_multiplier, result_shift, min, max);
+}
+
+void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
+ int result_fixedpoint_multiplier, int result_shift,
+ int min, int max)
+{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(),
@@ -141,7 +148,7 @@ void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::configure(const
build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16", build_opts.options()));
+ _kernel = create_kernel(compile_context, "gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info());
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp
index 9e0570a5e1..b6d98e6749 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp
@@ -118,6 +118,13 @@ void CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::configure(const I
int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
int min, int max)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
+}
+
+void CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
+ int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
+ int min, int max)
+{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), min, max));
@@ -140,7 +147,7 @@ void CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::configure(const I
build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemmlowp_output_stage_quantize_down_fixedpoint", build_opts.options()));
+ _kernel = create_kernel(compile_context, "gemmlowp_output_stage_quantize_down_fixedpoint", build_opts.options());
ICLKernel::configure_internal(win_config.second);
}
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
index 6ab3272bd7..7f2f2e75a9 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
@@ -118,6 +118,13 @@ void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure(const
int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
int min, int max)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
+}
+
+void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
+ int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
+ int min, int max)
+{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), min, max));
@@ -140,7 +147,7 @@ void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure(const
build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemmlowp_output_stage_quantize_down_fixedpoint", build_opts.options()));
+ _kernel = create_kernel(compile_context, "gemmlowp_output_stage_quantize_down_fixedpoint", build_opts.options());
ICLKernel::configure_internal(win_config.second);
}
diff --git a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
index 832e6281f4..e81ab2ffba 100644
--- a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
@@ -85,6 +85,11 @@ ICLGEMMLowpReductionKernel::ICLGEMMLowpReductionKernel()
void CLGEMMLowpMatrixAReductionKernel::configure(const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), mtx_a, vector_sum_row, info);
+}
+
+void CLGEMMLowpMatrixAReductionKernel::configure(CLCompileContext &compile_context, const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info)
+{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(mtx_a, vector_sum_row);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_matrix_a_reduction(mtx_a->info(), vector_sum_row->info()));
@@ -104,7 +109,7 @@ void CLGEMMLowpMatrixAReductionKernel::configure(const ICLTensor *mtx_a, ICLTens
std::string kernel_name = "gemmlowp_matrix_a_reduction" + std::string(is_dot8_supported ? "_dot8" : "");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
// This kernel does not need padding
@@ -154,6 +159,11 @@ void CLGEMMLowpMatrixAReductionKernel::run(const Window &window, cl::CommandQueu
void CLGEMMLowpMatrixBReductionKernel::configure(const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), mtx_b, vector_sum_col, info);
+}
+
+void CLGEMMLowpMatrixBReductionKernel::configure(CLCompileContext &compile_context, const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(mtx_b, vector_sum_col);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_matrix_b_reduction(mtx_b->info(), vector_sum_col->info()));
@@ -169,7 +179,7 @@ void CLGEMMLowpMatrixBReductionKernel::configure(const ICLTensor *mtx_b, ICLTens
build_opts.add_option_if(info.mul_by_scalar, "-DSCALAR=" + support::cpp11::to_string(info.scalar));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemmlowp_matrix_b_reduction", build_opts.options()));
+ _kernel = create_kernel(compile_context, "gemmlowp_matrix_b_reduction", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window_matrix_b_reduction(_input->info(), _output->info());
diff --git a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp
index 806afb4e1a..045ae282d6 100644
--- a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp
@@ -79,6 +79,11 @@ CLGEMMMatrixAccumulateBiasesKernel::CLGEMMMatrixAccumulateBiasesKernel()
void CLGEMMMatrixAccumulateBiasesKernel::configure(ICLTensor *accum, const ICLTensor *biases)
{
+ configure(CLKernelLibrary::get().get_compile_context(), accum, biases);
+}
+
+void CLGEMMMatrixAccumulateBiasesKernel::configure(CLCompileContext &compile_context, ICLTensor *accum, const ICLTensor *biases)
+{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(accum, biases);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(accum->info(), biases->info()));
@@ -101,7 +106,7 @@ void CLGEMMMatrixAccumulateBiasesKernel::configure(ICLTensor *accum, const ICLTe
build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_accumulate_biases", build_opts.options()));
+ _kernel = create_kernel(compile_context, "gemm_accumulate_biases", build_opts.options());
}
Status CLGEMMMatrixAccumulateBiasesKernel::validate(const ITensorInfo *accum, const ITensorInfo *biases, GPUTarget gpu_target)
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
index 8e764033e0..9587a042be 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
@@ -307,6 +307,12 @@ CLGEMMMatrixMultiplyKernel::CLGEMMMatrixMultiplyKernel()
void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, is_interleaved_transposed, reshape_info, fp_mixed_precision, activation_info);
+}
+
+void CLGEMMMatrixMultiplyKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
+ bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
// Perform validate step
@@ -430,7 +436,7 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = "gemm_";
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
index d150391b00..af4b097c72 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
@@ -214,6 +214,14 @@ void CLGEMMMatrixMultiplyNativeKernel::configure(const ICLTensor *input0, const
const GEMMLHSMatrixInfo &lhs_info,
const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info);
+}
+
+void CLGEMMMatrixMultiplyNativeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha,
+ float beta,
+ const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
@@ -281,7 +289,7 @@ void CLGEMMMatrixMultiplyNativeKernel::configure(const ICLTensor *input0, const
std::string kernel_name("gemm_mm_native");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
index 3f154a5790..eb01486087 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
@@ -216,6 +216,14 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, cons
const GEMMLHSMatrixInfo &lhs_info,
const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info);
+}
+
+void CLGEMMMatrixMultiplyReshapedKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha,
+ float beta,
+ const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
@@ -277,7 +285,7 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, cons
kernel_name += rhs_info.transpose ? "rhs_t" : "rhs_nt";
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp
index ed66997a03..011e93d9b3 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp
@@ -217,6 +217,14 @@ void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *input
const GEMMLHSMatrixInfo &lhs_info,
const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info);
+}
+
+void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha,
+ float beta,
+ const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
@@ -287,7 +295,7 @@ void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *input
kernel_name += rhs_info.transpose ? "t" : "nt";
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
diff --git a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
index 03889bd55f..98a1dee758 100644
--- a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
@@ -83,6 +83,11 @@ BorderSize CLGEMMMatrixVectorMultiplyKernel::border_size() const
void CLGEMMMatrixVectorMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output);
+}
+
+void CLGEMMMatrixVectorMultiplyKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info()));
@@ -100,7 +105,7 @@ void CLGEMMMatrixVectorMultiplyKernel::configure(const ICLTensor *input0, const
build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input0->info()->dimension(1)));
std::string kernel_name = is_quantized ? std::string("gemm_mv_quantized") : std::string("gemm_mv");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Add static arguments
if(is_quantized)
diff --git a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
index 6f92522cc0..73e3106ff8 100644
--- a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
@@ -121,6 +121,11 @@ CLGEMMReshapeLHSMatrixKernel::CLGEMMReshapeLHSMatrixKernel()
void CLGEMMReshapeLHSMatrixKernel::configure(const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, lhs_info, reinterpret_input_as_3d);
+}
+
+void CLGEMMReshapeLHSMatrixKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Perform validate step
@@ -146,7 +151,7 @@ void CLGEMMReshapeLHSMatrixKernel::configure(const ICLTensor *input, ICLTensor *
kernel_name += lhs_info.transpose ? "t" : "nt";
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info(), lhs_info, reinterpret_input_as_3d);
diff --git a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp
index 766b3e69dd..1623b1e552 100644
--- a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp
@@ -102,6 +102,11 @@ CLGEMMReshapeRHSMatrixKernel::CLGEMMReshapeRHSMatrixKernel()
void CLGEMMReshapeRHSMatrixKernel::configure(const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, rhs_info);
+}
+
+void CLGEMMReshapeRHSMatrixKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Perform validate step
@@ -124,7 +129,7 @@ void CLGEMMReshapeRHSMatrixKernel::configure(const ICLTensor *input, ICLTensor *
kernel_name += rhs_info.transpose ? "t" : "nt";
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info(), rhs_info);
diff --git a/src/core/CL/kernels/CLGatherKernel.cpp b/src/core/CL/kernels/CLGatherKernel.cpp
index ae77945852..6bee66ab93 100644
--- a/src/core/CL/kernels/CLGatherKernel.cpp
+++ b/src/core/CL/kernels/CLGatherKernel.cpp
@@ -89,6 +89,11 @@ CLGatherKernel::CLGatherKernel()
void CLGatherKernel::configure(const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, indices, output, axis);
+}
+
+void CLGatherKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, indices);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), indices->info(), output->info(), axis));
@@ -109,7 +114,7 @@ void CLGatherKernel::configure(const ICLTensor *input, const ICLTensor *indices,
build_opts.add_option("-DAXIS=" + support::cpp11::to_string(_axis));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gather", build_opts.options()));
+ _kernel = create_kernel(compile_context, "gather", build_opts.options());
ICLKernel::configure_internal(win_config.second);
}
diff --git a/src/core/CL/kernels/CLGaussian3x3Kernel.cpp b/src/core/CL/kernels/CLGaussian3x3Kernel.cpp
index 7e8f3139f2..0edf46b506 100644
--- a/src/core/CL/kernels/CLGaussian3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLGaussian3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,6 +41,11 @@ BorderSize CLGaussian3x3Kernel::border_size() const
void CLGaussian3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
+}
+
+void CLGaussian3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
@@ -55,7 +60,7 @@ void CLGaussian3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, b
};
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convolution3x3_static", build_opts));
+ _kernel = create_kernel(compile_context, "convolution3x3_static", build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
diff --git a/src/core/CL/kernels/CLGaussian5x5Kernel.cpp b/src/core/CL/kernels/CLGaussian5x5Kernel.cpp
index 3b45b07ed9..98436b950f 100644
--- a/src/core/CL/kernels/CLGaussian5x5Kernel.cpp
+++ b/src/core/CL/kernels/CLGaussian5x5Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,17 +29,27 @@ using namespace arm_compute;
void CLGaussian5x5HorKernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
+}
+
+void CLGaussian5x5HorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+{
const std::array<int16_t, 5> matrix = { 1, 4, 6, 4, 1 };
// Set arguments
- CLSeparableConvolution5x5HorKernel::configure(input, output, matrix.data(), border_undefined);
+ CLSeparableConvolution5x5HorKernel::configure(compile_context, input, output, matrix.data(), border_undefined);
}
void CLGaussian5x5VertKernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
+}
+
+void CLGaussian5x5VertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+{
const uint32_t scale = 256;
const std::array<int16_t, 5> matrix = { 1, 4, 6, 4, 1 };
// Set arguments
- CLSeparableConvolution5x5VertKernel::configure(input, output, matrix.data(), scale, border_undefined);
+ CLSeparableConvolution5x5VertKernel::configure(compile_context, input, output, matrix.data(), scale, border_undefined);
}
diff --git a/src/core/CL/kernels/CLGaussianPyramidKernel.cpp b/src/core/CL/kernels/CLGaussianPyramidKernel.cpp
index fd21bb69aa..8486d45e1a 100644
--- a/src/core/CL/kernels/CLGaussianPyramidKernel.cpp
+++ b/src/core/CL/kernels/CLGaussianPyramidKernel.cpp
@@ -44,6 +44,11 @@ BorderSize CLGaussianPyramidHorKernel::border_size() const
void CLGaussianPyramidHorKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLGaussianPyramidHorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U16);
ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != output->info()->dimension(1));
@@ -58,7 +63,7 @@ void CLGaussianPyramidHorKernel::configure(const ICLTensor *input, ICLTensor *ou
// Create kernel
const std::string kernel_name = std::string("gaussian1x5_sub_x");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ _kernel = create_kernel(compile_context, kernel_name);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 16;
@@ -150,6 +155,11 @@ BorderSize CLGaussianPyramidVertKernel::border_size() const
void CLGaussianPyramidVertKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLGaussianPyramidVertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != output->info()->dimension(0));
@@ -164,7 +174,7 @@ void CLGaussianPyramidVertKernel::configure(const ICLTensor *input, ICLTensor *o
// Create kernel
const std::string kernel_name = std::string("gaussian5x1_sub_y");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gaussian5x1_sub_y"));
+ _kernel = create_kernel(compile_context, "gaussian5x1_sub_y");
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
diff --git a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp
index 96e2213bb9..0f09152757 100644
--- a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp
+++ b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp
@@ -73,6 +73,11 @@ CLComputeAllAnchorsKernel::CLComputeAllAnchorsKernel()
void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), anchors, all_anchors, info);
+}
+
+void CLComputeAllAnchorsKernel::configure(CLCompileContext &compile_context, const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(anchors, all_anchors);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(anchors->info(), all_anchors->info(), info));
@@ -110,7 +115,7 @@ void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *a
// Create kernel
const std::string kernel_name = (is_quantized) ? "generate_proposals_compute_all_anchors_quantized" : "generate_proposals_compute_all_anchors";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// The tensor all_anchors can be interpreted as an array of structs (each structs has values_per_roi fields).
// This means we don't need to pad on the X dimension, as we know in advance how many fields
diff --git a/src/core/CL/kernels/CLHOGDescriptorKernel.cpp b/src/core/CL/kernels/CLHOGDescriptorKernel.cpp
index c251dec066..f79388e93d 100644
--- a/src/core/CL/kernels/CLHOGDescriptorKernel.cpp
+++ b/src/core/CL/kernels/CLHOGDescriptorKernel.cpp
@@ -48,6 +48,11 @@ CLHOGOrientationBinningKernel::CLHOGOrientationBinningKernel()
void CLHOGOrientationBinningKernel::configure(const ICLTensor *input_magnitude, const ICLTensor *input_phase, ICLTensor *output, const HOGInfo *hog_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input_magnitude, input_phase, output, hog_info);
+}
+
+void CLHOGOrientationBinningKernel::configure(CLCompileContext &compile_context, const ICLTensor *input_magnitude, const ICLTensor *input_phase, ICLTensor *output, const HOGInfo *hog_info)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_magnitude, 1, DataType::S16);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_phase, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(hog_info == nullptr);
@@ -75,7 +80,7 @@ void CLHOGOrientationBinningKernel::configure(const ICLTensor *input_magnitude,
// Create kernel
const std::string kernel_name = std::string("hog_orientation_binning");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
constexpr unsigned int num_elems_processed_per_iteration = 1;
constexpr unsigned int num_elems_read_per_iteration = 1;
@@ -139,6 +144,11 @@ CLHOGBlockNormalizationKernel::CLHOGBlockNormalizationKernel()
void CLHOGBlockNormalizationKernel::configure(const ICLTensor *input, ICLTensor *output, const HOGInfo *hog_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, hog_info);
+}
+
+void CLHOGBlockNormalizationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const HOGInfo *hog_info)
+{
ARM_COMPUTE_ERROR_ON(hog_info == nullptr);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, hog_info->num_bins(), DataType::F32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(output, DataType::F32);
@@ -172,7 +182,7 @@ void CLHOGBlockNormalizationKernel::configure(const ICLTensor *input, ICLTensor
build_opts.insert(args_str.str());
const std::string kernel_name = std::string("hog_block_normalization");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
constexpr unsigned int num_elems_processed_per_iteration = 1;
constexpr unsigned int num_elems_read_per_iteration = 1;
diff --git a/src/core/CL/kernels/CLHOGDetectorKernel.cpp b/src/core/CL/kernels/CLHOGDetectorKernel.cpp
index c003df4fd2..02fad20a05 100644
--- a/src/core/CL/kernels/CLHOGDetectorKernel.cpp
+++ b/src/core/CL/kernels/CLHOGDetectorKernel.cpp
@@ -45,6 +45,13 @@ CLHOGDetectorKernel::CLHOGDetectorKernel()
void CLHOGDetectorKernel::configure(const ICLTensor *input, const ICLHOG *hog, ICLDetectionWindowArray *detection_windows, cl::Buffer *num_detection_windows, const Size2D &detection_window_stride,
float threshold, uint16_t idx_class)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, hog, detection_windows, num_detection_windows, detection_window_stride, threshold, idx_class);
+}
+
+void CLHOGDetectorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLHOG *hog, ICLDetectionWindowArray *detection_windows, cl::Buffer *num_detection_windows,
+ const Size2D &detection_window_stride,
+ float threshold, uint16_t idx_class)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F32);
ARM_COMPUTE_ERROR_ON(hog == nullptr);
ARM_COMPUTE_ERROR_ON(detection_windows == nullptr);
@@ -82,7 +89,7 @@ void CLHOGDetectorKernel::configure(const ICLTensor *input, const ICLHOG *hog, I
// Create kernel
const std::string kernel_name = std::string("hog_detector");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Set static kernel arguments
unsigned int idx = num_arguments_per_2D_tensor(); // Skip the input parameters
diff --git a/src/core/CL/kernels/CLHarrisCornersKernel.cpp b/src/core/CL/kernels/CLHarrisCornersKernel.cpp
index eb1ebf6c14..2c344c7160 100644
--- a/src/core/CL/kernels/CLHarrisCornersKernel.cpp
+++ b/src/core/CL/kernels/CLHarrisCornersKernel.cpp
@@ -56,6 +56,13 @@ void CLHarrisScoreKernel::configure(const ICLImage *input1, const ICLImage *inpu
int32_t block_size, float norm_factor, float strength_thresh, float sensitivity,
bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, block_size, norm_factor, strength_thresh, sensitivity, border_undefined);
+}
+
+void CLHarrisScoreKernel::configure(CLCompileContext &compile_context, const ICLImage *input1, const ICLImage *input2, ICLImage *output,
+ int32_t block_size, float norm_factor, float strength_thresh, float sensitivity,
+ bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input1);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input2);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(output);
@@ -82,7 +89,7 @@ void CLHarrisScoreKernel::configure(const ICLImage *input1, const ICLImage *inpu
std::set<std::string> build_opts = { ("-DDATA_TYPE=" + get_cl_type_from_data_type(input1->info()->data_type())) };
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(harris_score_kernel_name.str(), build_opts));
+ _kernel = create_kernel(compile_context, harris_score_kernel_name.str(), build_opts);
// Set static kernel arguments
unsigned int idx = 3 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp
index a92382dad3..8d9e1b9f9d 100644
--- a/src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp
@@ -91,6 +91,11 @@ Status CLHeightConcatenateLayerKernel::validate(const ITensorInfo *input, unsign
void CLHeightConcatenateLayerKernel::configure(const ICLTensor *input, unsigned int height_offset, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, height_offset, output);
+}
+
+void CLHeightConcatenateLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int height_offset, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), height_offset, output->info()));
@@ -119,7 +124,7 @@ void CLHeightConcatenateLayerKernel::configure(const ICLTensor *input, unsigned
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("concatenate_height", build_opts.options()));
+ _kernel = create_kernel(compile_context, "concatenate_height", build_opts.options());
// Configure kernel window
ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
diff --git a/src/core/CL/kernels/CLHistogramKernel.cpp b/src/core/CL/kernels/CLHistogramKernel.cpp
index adb998d2f5..5c44f6eec5 100644
--- a/src/core/CL/kernels/CLHistogramKernel.cpp
+++ b/src/core/CL/kernels/CLHistogramKernel.cpp
@@ -53,6 +53,11 @@ CLHistogramKernel::CLHistogramKernel()
void CLHistogramKernel::configure(const ICLImage *input, ICLDistribution1D *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLHistogramKernel::configure(CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output)
+{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON(nullptr == output);
@@ -84,7 +89,7 @@ void CLHistogramKernel::configure(const ICLImage *input, ICLDistribution1D *outp
// Create kernel
bool is_fixed_size = (256 == num_bins) && (1 == window_size) && (0 == offset) && (256 == offrange);
const std::string kernel_name = is_fixed_size ? "hist_local_kernel_fixed" : "hist_local_kernel";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ _kernel = create_kernel(compile_context, kernel_name);
// Set static kernel arguments
unsigned int idx = num_arguments_per_2D_tensor(); //Skip the input and output parameters
@@ -158,6 +163,11 @@ CLHistogramBorderKernel::CLHistogramBorderKernel()
void CLHistogramBorderKernel::configure(const ICLImage *input, ICLDistribution1D *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLHistogramBorderKernel::configure(CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output)
+{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON(nullptr == output);
@@ -190,7 +200,7 @@ void CLHistogramBorderKernel::configure(const ICLImage *input, ICLDistribution1D
// Create kernel
bool is_fixed_size = (256 == num_bins) && (1 == window_size) && (0 == offset) && (256 == offrange);
const std::string kernel_name = is_fixed_size ? "hist_border_kernel_fixed" : "hist_border_kernel";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ _kernel = create_kernel(compile_context, kernel_name);
// Set static kernel arguments
unsigned int idx = num_arguments_per_2D_tensor(); //Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLIm2ColKernel.cpp b/src/core/CL/kernels/CLIm2ColKernel.cpp
index 4023c14137..b24d2509d1 100644
--- a/src/core/CL/kernels/CLIm2ColKernel.cpp
+++ b/src/core/CL/kernels/CLIm2ColKernel.cpp
@@ -295,6 +295,13 @@ CLIm2ColKernel::CLIm2ColKernel()
void CLIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation,
unsigned int num_groups)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, kernel_dims, conv_info, has_bias, dilation, num_groups);
+}
+
+void CLIm2ColKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias,
+ const Size2D &dilation,
+ unsigned int num_groups)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, dilation, num_groups));
@@ -311,7 +318,7 @@ void CLIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const
Im2ColConfiguration im2col_config = configure_opencl_kernel(input->info(), kernel_dims, conv_info, has_bias, dilation, num_groups);
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(im2col_config.kernel_name, im2col_config.build_options));
+ _kernel = create_kernel(compile_context, im2col_config.kernel_name, im2col_config.build_options);
_input = input;
_output = output;
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
index 30684a2816..62a0485eff 100644
--- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
@@ -77,6 +77,11 @@ CLInstanceNormalizationLayerKernel::CLInstanceNormalizationLayerKernel()
void CLInstanceNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
+}
+
+void CLInstanceNormalizationLayerKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
_input = input;
@@ -100,7 +105,7 @@ void CLInstanceNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *
build_opts.add_option_if(_input->info()->data_layout() == DataLayout::NHWC, "-DNHWC");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("instance_normalization", build_opts.options()));
+ _kernel = create_kernel(compile_context, "instance_normalization", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(_input->info(), _output->info());
diff --git a/src/core/CL/kernels/CLIntegralImageKernel.cpp b/src/core/CL/kernels/CLIntegralImageKernel.cpp
index 79aa820865..415531d85c 100644
--- a/src/core/CL/kernels/CLIntegralImageKernel.cpp
+++ b/src/core/CL/kernels/CLIntegralImageKernel.cpp
@@ -39,6 +39,11 @@ using namespace arm_compute;
void CLIntegralImageHorKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLIntegralImageHorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
@@ -47,7 +52,7 @@ void CLIntegralImageHorKernel::configure(const ICLTensor *input, ICLTensor *outp
// Create kernel
const std::string kernel_name = std::string("integral_horizontal");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ _kernel = create_kernel(compile_context, kernel_name);
// Configure kernel window
const unsigned int num_elems_processed_per_iteration = input->info()->dimension(0);
@@ -85,13 +90,18 @@ CLIntegralImageVertKernel::CLIntegralImageVertKernel()
void CLIntegralImageVertKernel::configure(ICLTensor *in_out)
{
+ configure(CLKernelLibrary::get().get_compile_context(), in_out);
+}
+
+void CLIntegralImageVertKernel::configure(CLCompileContext &compile_context, ICLTensor *in_out)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(in_out, 1, DataType::U32);
_in_out = in_out;
// Create kernel
const std::string kernel_name = std::string("integral_vertical");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ _kernel = create_kernel(compile_context, kernel_name);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration_x = 8;
diff --git a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp
index a0d7be043a..1817d15d3e 100644
--- a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp
+++ b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp
@@ -97,6 +97,11 @@ CLL2NormalizeLayerKernel::CLL2NormalizeLayerKernel()
void CLL2NormalizeLayerKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, sum, output, axis, epsilon);
+}
+
+void CLL2NormalizeLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), sum->info(), output->info(), axis, epsilon));
@@ -131,7 +136,7 @@ void CLL2NormalizeLayerKernel::configure(const ICLTensor *input, const ICLTensor
default:
ARM_COMPUTE_ERROR("Axis not supported");
}
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("l2_normalize_" + kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, "l2_normalize_" + kernel_name, build_opts);
// Set epsilon argument
if(input->info()->data_type() == DataType::F32)
diff --git a/src/core/CL/kernels/CLLKTrackerKernel.cpp b/src/core/CL/kernels/CLLKTrackerKernel.cpp
index 68a210c115..3a7c1b5b9e 100644
--- a/src/core/CL/kernels/CLLKTrackerKernel.cpp
+++ b/src/core/CL/kernels/CLLKTrackerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,6 +41,13 @@ using namespace arm_compute;
void CLLKTrackerInitKernel::configure(const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates,
ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
bool use_initial_estimate, size_t level, size_t num_levels, float pyramid_scale)
+{
+ configure(CLKernelLibrary::get().get_compile_context(), old_points, new_points_estimates, old_points_internal, new_points_internal, use_initial_estimate, level, num_levels, pyramid_scale);
+}
+
+void CLLKTrackerInitKernel::configure(CLCompileContext &compile_context, const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates,
+ ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
+ bool use_initial_estimate, size_t level, size_t num_levels, float pyramid_scale)
{
ARM_COMPUTE_ERROR_ON(old_points == nullptr);
@@ -55,7 +62,7 @@ void CLLKTrackerInitKernel::configure(const ICLKeyPointArray *old_points, const
{
kernel_name += (use_initial_estimate) ? std::string("_max_initial_estimate") : std::string("_max");
}
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ _kernel = create_kernel(compile_context, kernel_name);
// Set static kernel arguments
unsigned int idx = 0;
@@ -87,13 +94,18 @@ void CLLKTrackerInitKernel::run(const Window &window, cl::CommandQueue &queue)
}
void CLLKTrackerFinalizeKernel::configure(ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points)
+{
+ configure(CLKernelLibrary::get().get_compile_context(), new_points_internal, new_points);
+}
+
+void CLLKTrackerFinalizeKernel::configure(CLCompileContext &compile_context, ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points)
{
ARM_COMPUTE_ERROR_ON(new_points_internal == nullptr);
ARM_COMPUTE_ERROR_ON(new_points == nullptr);
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("finalize"));
+ _kernel = create_kernel(compile_context, "finalize");
// Set static kernel arguments
unsigned int idx = 0;
@@ -124,6 +136,14 @@ void CLLKTrackerStage0Kernel::configure(const ICLTensor *old_input, const ICLTen
ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
size_t window_dimension, size_t level)
+{
+ configure(CLKernelLibrary::get().get_compile_context(), old_input, old_scharr_gx, old_scharr_gy, old_points_internal, new_points_internal, coeff_table, old_ival, window_dimension, level);
+}
+
+void CLLKTrackerStage0Kernel::configure(CLCompileContext &compile_context, const ICLTensor *old_input, const ICLTensor *old_scharr_gx, const ICLTensor *old_scharr_gy,
+ ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal,
+ ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
+ size_t window_dimension, size_t level)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(old_input, 1, DataType::U8);
@@ -175,7 +195,7 @@ void CLLKTrackerStage0Kernel::configure(const ICLTensor *old_input, const ICLTen
};
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("lktracker_stage0"));
+ _kernel = create_kernel(compile_context, "lktracker_stage0");
// Set arguments
unsigned int idx = 3 * num_arguments_per_2D_tensor();
@@ -212,6 +232,13 @@ CLLKTrackerStage1Kernel::CLLKTrackerStage1Kernel()
void CLLKTrackerStage1Kernel::configure(const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival,
Termination termination, float epsilon, size_t num_iterations, size_t window_dimension, size_t level)
+{
+ configure(CLKernelLibrary::get().get_compile_context(), new_input, new_points_internal, coeff_table, old_ival, termination, epsilon, num_iterations, window_dimension, level);
+}
+
+void CLLKTrackerStage1Kernel::configure(CLCompileContext &compile_context, const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table,
+ ICLOldValArray *old_ival,
+ Termination termination, float epsilon, size_t num_iterations, size_t window_dimension, size_t level)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(new_input, 1, DataType::U8);
@@ -257,7 +284,7 @@ void CLLKTrackerStage1Kernel::configure(const ICLTensor *new_input, ICLLKInterna
const int term_epsilon = (termination == Termination::TERM_CRITERIA_EPSILON || termination == Termination::TERM_CRITERIA_BOTH) ? 1 : 0;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("lktracker_stage1"));
+ _kernel = create_kernel(compile_context, "lktracker_stage1");
// Set static kernel arguments
unsigned int idx = num_arguments_per_2D_tensor();
diff --git a/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp
index ad2f3a4892..fb750583c0 100644
--- a/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -83,6 +83,11 @@ std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input0, IT
void CLLocallyConnectedMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output);
+}
+
+void CLLocallyConnectedMatrixMultiplyKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info()));
@@ -108,7 +113,7 @@ void CLLocallyConnectedMatrixMultiplyKernel::configure(const ICLTensor *input0,
// Create kernel
std::string data_type_name = lower_string(string_from_data_type(input0->info()->data_type()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(("gemm_lc_vm_" + data_type_name), build_opts));
+ _kernel = create_kernel(compile_context, ("gemm_lc_vm_" + data_type_name), build_opts);
// Configure kernel window
auto win_config = validate_and_configure_window(input0->info(), input1->info(), output->info());
diff --git a/src/core/CL/kernels/CLMagnitudePhaseKernel.cpp b/src/core/CL/kernels/CLMagnitudePhaseKernel.cpp
index 68f793b6e2..2c28e030d2 100644
--- a/src/core/CL/kernels/CLMagnitudePhaseKernel.cpp
+++ b/src/core/CL/kernels/CLMagnitudePhaseKernel.cpp
@@ -47,6 +47,12 @@ CLMagnitudePhaseKernel::CLMagnitudePhaseKernel()
void CLMagnitudePhaseKernel::configure(const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase,
MagnitudeType mag_type, PhaseType phase_type)
{
+ configure(CLKernelLibrary::get().get_compile_context(), gx, gy, magnitude, phase, mag_type, phase_type);
+}
+
+void CLMagnitudePhaseKernel::configure(CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase,
+ MagnitudeType mag_type, PhaseType phase_type)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gx, 1, DataType::S16, DataType::S32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gy, 1, DataType::S16, DataType::S32);
ARM_COMPUTE_ERROR_ON((magnitude == nullptr) && (phase == nullptr));
@@ -118,7 +124,7 @@ void CLMagnitudePhaseKernel::configure(const ICLTensor *gx, const ICLTensor *gy,
// Create kernel
const std::string kernel_name = std::string("magnitude_phase");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 16;
diff --git a/src/core/CL/kernels/CLMeanStdDevKernel.cpp b/src/core/CL/kernels/CLMeanStdDevKernel.cpp
index 7bfd6d6e53..5a6630d5d4 100644
--- a/src/core/CL/kernels/CLMeanStdDevKernel.cpp
+++ b/src/core/CL/kernels/CLMeanStdDevKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -65,6 +65,11 @@ Status CLMeanStdDevKernel::validate(const ITensorInfo *input, float *mean, cl::B
void CLMeanStdDevKernel::configure(const ICLImage *input, float *mean, cl::Buffer *global_sum, float *stddev, cl::Buffer *global_sum_squared)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, mean, global_sum, stddev, global_sum_squared);
+}
+
+void CLMeanStdDevKernel::configure(CLCompileContext &compile_context, const ICLImage *input, float *mean, cl::Buffer *global_sum, float *stddev, cl::Buffer *global_sum_squared)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, global_sum);
ARM_COMPUTE_ERROR_ON(stddev && nullptr == global_sum_squared);
ARM_COMPUTE_ERROR_THROW_ON(CLMeanStdDevKernel::validate(input->info(), mean, global_sum, stddev, global_sum_squared));
@@ -83,7 +88,7 @@ void CLMeanStdDevKernel::configure(const ICLImage *input, float *mean, cl::Buffe
build_opts.insert("-DSTDDEV");
}
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("mean_stddev_accumulate", build_opts));
+ _kernel = create_kernel(compile_context, "mean_stddev_accumulate", build_opts);
// Set fixed arguments
unsigned int idx = num_arguments_per_2D_tensor(); //Skip the input parameters
diff --git a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp
index 72935cea6d..11ef86e8c3 100644
--- a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp
+++ b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp
@@ -85,6 +85,11 @@ CLMeanStdDevNormalizationKernel::CLMeanStdDevNormalizationKernel()
void CLMeanStdDevNormalizationKernel::configure(ICLTensor *input, ICLTensor *output, float epsilon)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, epsilon);
+}
+
+void CLMeanStdDevNormalizationKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float epsilon)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
_run_in_place = (output == nullptr) || (output == input);
@@ -105,7 +110,7 @@ void CLMeanStdDevNormalizationKernel::configure(ICLTensor *input, ICLTensor *out
build_opts.add_option_if(_run_in_place, "-DIN_PLACE");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("mean_stddev_normalization", build_opts.options()));
+ _kernel = create_kernel(compile_context, "mean_stddev_normalization", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), (_run_in_place) ? nullptr : output->info());
diff --git a/src/core/CL/kernels/CLMedian3x3Kernel.cpp b/src/core/CL/kernels/CLMedian3x3Kernel.cpp
index fca8d9b604..cfc9591584 100644
--- a/src/core/CL/kernels/CLMedian3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLMedian3x3Kernel.cpp
@@ -39,6 +39,11 @@ BorderSize CLMedian3x3Kernel::border_size() const
void CLMedian3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
+}
+
+void CLMedian3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
@@ -47,7 +52,7 @@ void CLMedian3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, boo
// Create kernel
const std::string kernel_name = std::string("non_linear_filter_box3x3");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, { "-DMEDIAN" }));
+ _kernel = create_kernel(compile_context, kernel_name, { "-DMEDIAN" });
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
diff --git a/src/core/CL/kernels/CLMemsetKernel.cpp b/src/core/CL/kernels/CLMemsetKernel.cpp
index b06ae7e118..9b37cb81fd 100644
--- a/src/core/CL/kernels/CLMemsetKernel.cpp
+++ b/src/core/CL/kernels/CLMemsetKernel.cpp
@@ -44,6 +44,13 @@ void CLMemsetKernel::configure(ICLTensor *tensor,
const PixelValue &constant_value,
Window *window)
{
+ configure(CLKernelLibrary::get().get_compile_context(), tensor, constant_value, window);
+}
+
+void CLMemsetKernel::configure(CLCompileContext &compile_context, ICLTensor *tensor,
+ const PixelValue &constant_value,
+ Window *window)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
ARM_COMPUTE_ERROR_THROW_ON(validate(tensor->info(), constant_value, window));
@@ -77,7 +84,7 @@ void CLMemsetKernel::configure(ICLTensor *tensor,
build_opts.add_option("-DCONSTANT_VALUE=" + string_from_pixel_value(constant_value, data_type));
build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
build_opts.add_option_if(multi_access_x && remainder_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("memset", build_opts.options()));
+ _kernel = create_kernel(compile_context, "memset", build_opts.options());
}
Status CLMemsetKernel::validate(const ITensorInfo *tensor, const PixelValue &constant_value, Window *window)
diff --git a/src/core/CL/kernels/CLMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLMinMaxLayerKernel.cpp
index 622270acf5..c89bbcb320 100644
--- a/src/core/CL/kernels/CLMinMaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLMinMaxLayerKernel.cpp
@@ -88,6 +88,11 @@ CLMinMaxLayerKernel::CLMinMaxLayerKernel()
void CLMinMaxLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLMinMaxLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
@@ -100,7 +105,7 @@ void CLMinMaxLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
build_opts.emplace("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("minmax_layer", build_opts));
+ _kernel = create_kernel(compile_context, "minmax_layer", build_opts);
auto win_config = validate_and_configure_window(input->info(), output->info());
diff --git a/src/core/CL/kernels/CLMinMaxLocationKernel.cpp b/src/core/CL/kernels/CLMinMaxLocationKernel.cpp
index 3cead37cd8..77c945bed1 100644
--- a/src/core/CL/kernels/CLMinMaxLocationKernel.cpp
+++ b/src/core/CL/kernels/CLMinMaxLocationKernel.cpp
@@ -62,6 +62,11 @@ CLMinMaxKernel::CLMinMaxKernel()
void CLMinMaxKernel::configure(const ICLImage *input, cl::Buffer *min_max)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, min_max);
+}
+
+void CLMinMaxKernel::configure(CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S16, DataType::F32);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON(min_max == nullptr);
@@ -109,7 +114,7 @@ void CLMinMaxKernel::configure(const ICLImage *input, cl::Buffer *min_max)
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("minmax", build_opts));
+ _kernel = create_kernel(compile_context, "minmax", build_opts);
// Set fixed arguments
unsigned int idx = num_arguments_per_2D_tensor(); //Skip the input and output parameters
@@ -169,6 +174,12 @@ CLMinMaxLocationKernel::CLMinMaxLocationKernel()
void CLMinMaxLocationKernel::configure(const ICLImage *input, cl::Buffer *min_max, cl::Buffer *min_max_count, ICLCoordinates2DArray *min_loc, ICLCoordinates2DArray *max_loc)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, min_max, min_max_count, min_loc, max_loc);
+}
+
+void CLMinMaxLocationKernel::configure(CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max, cl::Buffer *min_max_count, ICLCoordinates2DArray *min_loc,
+ ICLCoordinates2DArray *max_loc)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S16, DataType::F32);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON(min_max == nullptr);
@@ -189,7 +200,7 @@ void CLMinMaxLocationKernel::configure(const ICLImage *input, cl::Buffer *min_ma
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("minmaxloc", build_opts));
+ _kernel = create_kernel(compile_context, "minmaxloc", build_opts);
// Set static arguments
unsigned int idx = num_arguments_per_2D_tensor(); //Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLNonLinearFilterKernel.cpp b/src/core/CL/kernels/CLNonLinearFilterKernel.cpp
index 5e419743d0..01b8733ab8 100644
--- a/src/core/CL/kernels/CLNonLinearFilterKernel.cpp
+++ b/src/core/CL/kernels/CLNonLinearFilterKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -57,6 +57,13 @@ void CLNonLinearFilterKernel::configure(const ICLTensor *input, ICLTensor *outpu
unsigned int mask_size, MatrixPattern pattern, const uint8_t *mask,
bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, function, mask_size, pattern, mask, border_undefined);
+}
+
+void CLNonLinearFilterKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NonLinearFilterFunction function,
+ unsigned int mask_size, MatrixPattern pattern, const uint8_t *mask,
+ bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(mask_size != 3 && mask_size != 5);
@@ -78,7 +85,7 @@ void CLNonLinearFilterKernel::configure(const ICLTensor *input, ICLTensor *outpu
ss << "non_linear_filter_" << pattern_name << mask_size << "x" << mask_size;
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(ss.str(), build_opts));
+ _kernel = create_kernel(compile_context, ss.str(), build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
diff --git a/src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp b/src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp
index 4e41f0df42..dd6aa1ea8f 100644
--- a/src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,6 +43,11 @@ BorderSize CLNonMaximaSuppression3x3Kernel::border_size() const
void CLNonMaximaSuppression3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
+}
+
+void CLNonMaximaSuppression3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::F32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::F32);
@@ -51,7 +56,7 @@ void CLNonMaximaSuppression3x3Kernel::configure(const ICLTensor *input, ICLTenso
// Create kernel
std::set<std::string> build_opts = { ("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())) };
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("non_max_suppression", build_opts));
+ _kernel = create_kernel(compile_context, "non_max_suppression", build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
diff --git a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
index 024d1de9b0..6284a6acb4 100644
--- a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
@@ -107,6 +107,11 @@ BorderSize CLNormalizationLayerKernel::border_size() const
void CLNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, norm_info);
+}
+
+void CLNormalizationLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Output tensor auto initialization if not yet initialized
@@ -156,7 +161,7 @@ void CLNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *ou
kernel_name = "normalization_layer_in_map_nchw";
}
}
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info(), norm_info);
diff --git a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp
index c713e1fffa..d46581e4dc 100644
--- a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp
+++ b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp
@@ -97,6 +97,11 @@ CLNormalizePlanarYUVLayerKernel::CLNormalizePlanarYUVLayerKernel()
void CLNormalizePlanarYUVLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, mean, std);
+}
+
+void CLNormalizePlanarYUVLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std)
+{
// Perform validation step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, mean, std);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), mean->info(), std->info()));
@@ -127,7 +132,7 @@ void CLNormalizePlanarYUVLayerKernel::configure(const ICLTensor *input, ICLTenso
// Create kernel
kernel_name += lower_string(string_from_data_layout(input->info()->data_layout()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info(), mean->info(), std->info());
diff --git a/src/core/CL/kernels/CLPadLayerKernel.cpp b/src/core/CL/kernels/CLPadLayerKernel.cpp
index 3eeef5583c..764e2a41e7 100644
--- a/src/core/CL/kernels/CLPadLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPadLayerKernel.cpp
@@ -98,6 +98,11 @@ CLPadLayerKernel::CLPadLayerKernel()
void CLPadLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, padding, constant_value, mode);
+}
+
+void CLPadLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
+{
// Perform validation step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding, constant_value, mode));
@@ -189,7 +194,7 @@ void CLPadLayerKernel::configure(const ICLTensor *input, ICLTensor *output, cons
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
}
Status CLPadLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
diff --git a/src/core/CL/kernels/CLPermuteKernel.cpp b/src/core/CL/kernels/CLPermuteKernel.cpp
index 9a4d72db88..3f1f870802 100644
--- a/src/core/CL/kernels/CLPermuteKernel.cpp
+++ b/src/core/CL/kernels/CLPermuteKernel.cpp
@@ -77,6 +77,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
void CLPermuteKernel::configure(const ICLTensor *input, ICLTensor *output, const PermutationVector &perm)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, perm);
+}
+
+void CLPermuteKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PermutationVector &perm)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), perm));
@@ -98,7 +103,7 @@ void CLPermuteKernel::configure(const ICLTensor *input, ICLTensor *output, const
build_opts.add_option("-DP3=" + support::cpp11::to_string((_perm.num_dimensions() >= 3) ? perm[2] : 2));
build_opts.add_option("-DP4=" + support::cpp11::to_string((_perm.num_dimensions() >= 4) ? perm[3] : 3));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("permute", build_opts.options()));
+ _kernel = create_kernel(compile_context, "permute", build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
index 2df3ff4f34..49f5e04433 100644
--- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
+++ b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
@@ -145,6 +145,12 @@ CLPixelWiseMultiplicationKernel::CLPixelWiseMultiplicationKernel()
void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, scale, overflow_policy, rounding_policy, act_info);
+}
+
+void CLPixelWiseMultiplicationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info(),
scale, overflow_policy, rounding_policy, act_info));
@@ -233,7 +239,7 @@ void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const I
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set scale argument
unsigned int idx = 3 * num_arguments_per_3D_tensor(); // Skip the inputs and output parameters
@@ -370,6 +376,11 @@ CLComplexPixelWiseMultiplicationKernel::CLComplexPixelWiseMultiplicationKernel()
void CLComplexPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info);
+}
+
+void CLComplexPixelWiseMultiplicationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1->info(), input2->info(), output->info(), act_info));
@@ -390,7 +401,7 @@ void CLComplexPixelWiseMultiplicationKernel::configure(const ICLTensor *input1,
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("pixelwise_mul_complex", build_opts.options()));
+ _kernel = create_kernel(compile_context, "pixelwise_mul_complex", build_opts.options());
ICLKernel::configure_internal(win_config.second);
}
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index dbbca4771b..43b8f85c39 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -178,6 +178,11 @@ BorderSize CLPoolingLayerKernel::border_size() const
void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, pool_info, indices);
+}
+
+void CLPoolingLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Set instance variables
@@ -275,12 +280,12 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
std::string kernel_name = ((is_pool3x3_stride_le3) ? "pooling_layer_optimized_" : "pooling_layer_")
+ support::cpp11::to_string(pool_size_x);
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
}
else // Run general case
{
std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized_nchw" : "pooling_layer_MxN_nchw";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
}
break;
}
@@ -292,7 +297,7 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
build_opts.add_option_if(output->info()->tensor_shape().total_size_upper(3) > 1,
"-DDST_DEPTH=" + support::cpp11::to_string(output->info()->dimension(idx_height)));
std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized_nhwc" : "pooling_layer_MxN_nhwc";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
break;
}
default:
diff --git a/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp b/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp
index ed70f4ddb7..9f930c54c2 100644
--- a/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp
@@ -102,6 +102,12 @@ CLPriorBoxLayerKernel::CLPriorBoxLayerKernel()
void CLPriorBoxLayerKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min, cl::Buffer *max, cl::Buffer *aspect_ratios)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, info, min, max, aspect_ratios);
+}
+
+void CLPriorBoxLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min,
+ cl::Buffer *max, cl::Buffer *aspect_ratios)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
_input1 = input1;
@@ -170,7 +176,7 @@ void CLPriorBoxLayerKernel::configure(const ICLTensor *input1, const ICLTensor *
}
unsigned int idx = num_arguments_per_2D_tensor();
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("prior_box_layer_nchw", build_opts.options()));
+ _kernel = create_kernel(compile_context, "prior_box_layer_nchw", build_opts.options());
_kernel.setArg(idx++, *_min);
_kernel.setArg(idx++, *_max);
diff --git a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
index 2ec2bd1178..e017946673 100644
--- a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
@@ -81,6 +81,11 @@ CLQuantizationLayerKernel::CLQuantizationLayerKernel()
void CLQuantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLQuantizationLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
@@ -154,7 +159,7 @@ void CLQuantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *out
build_opts.add_option("-DMIN_QUANT_VAL=" + support::cpp11::to_string(min_max_quant_values.first));
build_opts.add_option("-DMAX_QUANT_VAL=" + support::cpp11::to_string(min_max_quant_values.second));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("quantization_layer", build_opts.options()));
+ _kernel = create_kernel(compile_context, "quantization_layer", build_opts.options());
}
Status CLQuantizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
diff --git a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
index 41ed8ede7f..cc1af52342 100644
--- a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
+++ b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
@@ -105,6 +105,11 @@ CLROIAlignLayerKernel::CLROIAlignLayerKernel()
void CLROIAlignLayerKernel::configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, rois, output, pool_info);
+}
+
+void CLROIAlignLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, rois);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), rois->info(), output->info(), pool_info));
@@ -149,7 +154,7 @@ void CLROIAlignLayerKernel::configure(const ICLTensor *input, const ICLTensor *r
// Create kernel
const std::string kernel_name = (is_qasymm) ? "roi_align_layer_quantized" : "roi_align_layer";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
ICLKernel::configure_internal(win_config.second);
}
diff --git a/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp b/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp
index dad33b1bf8..5f64215485 100644
--- a/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp
@@ -73,6 +73,11 @@ CLROIPoolingLayerKernel::CLROIPoolingLayerKernel()
void CLROIPoolingLayerKernel::configure(const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, rois, output, pool_info);
+}
+
+void CLROIPoolingLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, rois, output);
//Validate arguments
@@ -115,7 +120,7 @@ void CLROIPoolingLayerKernel::configure(const ICLTensor *input, const ICLTensor
// Create kernel
std::string kernel_name = "roi_pooling_layer";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Set static kernel arguments
unsigned int idx = 2 * num_arguments_per_3D_tensor() + num_arguments_per_1D_array();
diff --git a/src/core/CL/kernels/CLRangeKernel.cpp b/src/core/CL/kernels/CLRangeKernel.cpp
index 4024de7f2b..97b5f01df4 100644
--- a/src/core/CL/kernels/CLRangeKernel.cpp
+++ b/src/core/CL/kernels/CLRangeKernel.cpp
@@ -93,6 +93,11 @@ CLRangeKernel::CLRangeKernel()
void CLRangeKernel::configure(ICLTensor *output, const float start, const float end, const float step)
{
+ configure(CLKernelLibrary::get().get_compile_context(), output, start, end, step);
+}
+
+void CLRangeKernel::configure(CLCompileContext &compile_context, ICLTensor *output, const float start, const float end, const float step)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*(output->info()), start, end, step));
@@ -123,7 +128,7 @@ void CLRangeKernel::configure(ICLTensor *output, const float start, const float
kernel_name += "_quantized";
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
ICLKernel::configure_internal(win_config.second);
// Set config_id for enabling LWS tuning
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp
index 7563f02ff3..5c760168ca 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.cpp
+++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp
@@ -129,6 +129,11 @@ BorderSize CLReductionOperationKernel::border_size() const
void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, axis, op, width);
+}
+
+void CLReductionOperationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op, width));
@@ -227,7 +232,7 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
default:
ARM_COMPUTE_ERROR("Not supported");
}
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reduction_operation_" + kernel_axis_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, "reduction_operation_" + kernel_axis_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis, op);
diff --git a/src/core/CL/kernels/CLRemapKernel.cpp b/src/core/CL/kernels/CLRemapKernel.cpp
index 12161fcd70..fb425b512f 100644
--- a/src/core/CL/kernels/CLRemapKernel.cpp
+++ b/src/core/CL/kernels/CLRemapKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,6 +49,12 @@ BorderSize CLRemapKernel::border_size() const
void CLRemapKernel::configure(const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, map_x, map_y, output, policy, border_undefined);
+}
+
+void CLRemapKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy,
+ bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(map_x, 1, DataType::F32);
@@ -66,7 +72,7 @@ void CLRemapKernel::configure(const ICLTensor *input, const ICLTensor *map_x, co
std::string interpolation_name = string_from_interpolation_policy(policy);
std::transform(interpolation_name.begin(), interpolation_name.end(), interpolation_name.begin(), ::tolower);
std::string kernel_name = "remap_" + interpolation_name;
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure window
constexpr unsigned int num_elems_processed_per_iteration = 4;
diff --git a/src/core/CL/kernels/CLReorgLayerKernel.cpp b/src/core/CL/kernels/CLReorgLayerKernel.cpp
index 4aa50c1889..e36bcbbe34 100644
--- a/src/core/CL/kernels/CLReorgLayerKernel.cpp
+++ b/src/core/CL/kernels/CLReorgLayerKernel.cpp
@@ -72,6 +72,11 @@ CLReorgLayerKernel::CLReorgLayerKernel()
void CLReorgLayerKernel::configure(const ICLTensor *input, ICLTensor *output, int32_t stride)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, stride);
+}
+
+void CLReorgLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t stride)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), stride));
@@ -86,7 +91,7 @@ void CLReorgLayerKernel::configure(const ICLTensor *input, ICLTensor *output, in
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
build_opts.add_option("-DSRC_DEPTH=" + support::cpp11::to_string(input->info()->dimension(idx_channel)));
build_opts.add_option("-DSTRIDE=" + support::cpp11::to_string(stride));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure window
// auto inizialize the output tensor if not yet initialized
diff --git a/src/core/CL/kernels/CLReshapeLayerKernel.cpp b/src/core/CL/kernels/CLReshapeLayerKernel.cpp
index a6053d97e3..33a1ceacc4 100644
--- a/src/core/CL/kernels/CLReshapeLayerKernel.cpp
+++ b/src/core/CL/kernels/CLReshapeLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,6 +64,11 @@ CLReshapeLayerKernel::CLReshapeLayerKernel()
void CLReshapeLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLReshapeLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
@@ -72,7 +77,7 @@ void CLReshapeLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
// Create kernel
std::set<std::string> build_opts = { "-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size()) };
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reshape_layer", build_opts));
+ _kernel = create_kernel(compile_context, "reshape_layer", build_opts);
// Add static arguments
const cl_int2 input_shape =
diff --git a/src/core/CL/kernels/CLReverseKernel.cpp b/src/core/CL/kernels/CLReverseKernel.cpp
index 83708e268c..d88a78c029 100644
--- a/src/core/CL/kernels/CLReverseKernel.cpp
+++ b/src/core/CL/kernels/CLReverseKernel.cpp
@@ -65,6 +65,11 @@ CLReverseKernel::CLReverseKernel()
void CLReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *axis)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, axis);
+}
+
+void CLReverseKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *axis)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, axis);
_input = input;
@@ -82,7 +87,7 @@ void CLReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const
build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size()));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reverse", build_opts.options()));
+ _kernel = create_kernel(compile_context, "reverse", build_opts.options());
// Set static kernel arguments
unsigned int idx = 2 * num_arguments_per_4D_tensor() + num_arguments_per_1D_tensor();
diff --git a/src/core/CL/kernels/CLScaleKernel.cpp b/src/core/CL/kernels/CLScaleKernel.cpp
index 244afb5e4c..33c7ad71c1 100644
--- a/src/core/CL/kernels/CLScaleKernel.cpp
+++ b/src/core/CL/kernels/CLScaleKernel.cpp
@@ -182,6 +182,12 @@ const ICLTensor *CLScaleKernel::output() const
void CLScaleKernel::configure(const ICLTensor *input, ICLTensor *output, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy, bool align_corners)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, border_mode, sampling_policy, align_corners);
+}
+
+void CLScaleKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy,
+ bool align_corners)
+{
_align_corners = policy == InterpolationPolicy::BILINEAR
&& sampling_policy == SamplingPolicy::TOP_LEFT
&& align_corners;
@@ -236,7 +242,7 @@ void CLScaleKernel::configure(const ICLTensor *input, ICLTensor *output, Interpo
std::string kernel_name = "scale_" + interpolation_name;
kernel_name += call_quantized_kernel ? "_quantized_" : "_";
kernel_name += lower_string(string_from_data_layout(_data_layout));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
unsigned int idx = is_nhwc ? 2 * num_arguments_per_4D_tensor() : 2 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLScharr3x3Kernel.cpp b/src/core/CL/kernels/CLScharr3x3Kernel.cpp
index 94b0d38c52..9f5bdb3cd8 100644
--- a/src/core/CL/kernels/CLScharr3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLScharr3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,6 +49,11 @@ BorderSize CLScharr3x3Kernel::border_size() const
void CLScharr3x3Kernel::configure(const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined);
+}
+
+void CLScharr3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr));
@@ -83,7 +88,7 @@ void CLScharr3x3Kernel::configure(const ICLTensor *input, ICLTensor *output_x, I
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("scharr3x3", build_opts));
+ _kernel = create_kernel(compile_context, "scharr3x3", build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
diff --git a/src/core/CL/kernels/CLSelectKernel.cpp b/src/core/CL/kernels/CLSelectKernel.cpp
index 5ee5405465..866ec6bde2 100644
--- a/src/core/CL/kernels/CLSelectKernel.cpp
+++ b/src/core/CL/kernels/CLSelectKernel.cpp
@@ -104,6 +104,11 @@ CLSelectKernel::CLSelectKernel()
}
void CLSelectKernel::configure(const ICLTensor *c, const ICLTensor *x, const ICLTensor *y, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), c, x, y, output);
+}
+
+void CLSelectKernel::configure(CLCompileContext &compile_context, const ICLTensor *c, const ICLTensor *x, const ICLTensor *y, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(c, x, y, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(c->info(), x->info(), y->info(), output->info()));
@@ -141,7 +146,7 @@ void CLSelectKernel::configure(const ICLTensor *c, const ICLTensor *x, const ICL
kernel_name += "_different_rank";
kernel_name += is_input_rank_greater_than_two ? "_n" : "_2";
}
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(c->info(), x->info(), y->info(), output->info());
diff --git a/src/core/CL/kernels/CLSobel3x3Kernel.cpp b/src/core/CL/kernels/CLSobel3x3Kernel.cpp
index 1186feb1b9..1c97c13d96 100644
--- a/src/core/CL/kernels/CLSobel3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLSobel3x3Kernel.cpp
@@ -50,6 +50,11 @@ BorderSize CLSobel3x3Kernel::border_size() const
void CLSobel3x3Kernel::configure(const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined);
+}
+
+void CLSobel3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr));
@@ -85,7 +90,7 @@ void CLSobel3x3Kernel::configure(const ICLTensor *input, ICLTensor *output_x, IC
// Create kernel
const std::string kernel_name = std::string("sobel3x3");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
diff --git a/src/core/CL/kernels/CLSobel5x5Kernel.cpp b/src/core/CL/kernels/CLSobel5x5Kernel.cpp
index cafdd9807a..597807796e 100644
--- a/src/core/CL/kernels/CLSobel5x5Kernel.cpp
+++ b/src/core/CL/kernels/CLSobel5x5Kernel.cpp
@@ -50,6 +50,11 @@ BorderSize CLSobel5x5HorKernel::border_size() const
void CLSobel5x5HorKernel::configure(const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined);
+}
+
+void CLSobel5x5HorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr));
@@ -86,7 +91,7 @@ void CLSobel5x5HorKernel::configure(const ICLTensor *input, ICLTensor *output_x,
// Create kernel
const std::string kernel_name = std::string("sobel_separable1x5");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
@@ -148,6 +153,11 @@ BorderSize CLSobel5x5VertKernel::border_size() const
void CLSobel5x5VertKernel::configure(const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input_x, input_y, output_x, output_y, border_undefined);
+}
+
+void CLSobel5x5VertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr));
_run_sobel_x = output_x != nullptr;
@@ -185,7 +195,7 @@ void CLSobel5x5VertKernel::configure(const ICLTensor *input_x, const ICLTensor *
// Create kernel
const std::string kernel_name = std::string("sobel_separable5x1");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
const ICLTensor *input = _run_sobel_x ? _input_x : _input_y;
diff --git a/src/core/CL/kernels/CLSobel7x7Kernel.cpp b/src/core/CL/kernels/CLSobel7x7Kernel.cpp
index 0c5bb58cb9..183ebce3ac 100644
--- a/src/core/CL/kernels/CLSobel7x7Kernel.cpp
+++ b/src/core/CL/kernels/CLSobel7x7Kernel.cpp
@@ -50,6 +50,11 @@ BorderSize CLSobel7x7HorKernel::border_size() const
void CLSobel7x7HorKernel::configure(const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined);
+}
+
+void CLSobel7x7HorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr));
@@ -88,7 +93,7 @@ void CLSobel7x7HorKernel::configure(const ICLTensor *input, ICLTensor *output_x,
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
@@ -150,6 +155,11 @@ BorderSize CLSobel7x7VertKernel::border_size() const
void CLSobel7x7VertKernel::configure(const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input_x, input_y, output_x, output_y, border_undefined);
+}
+
+void CLSobel7x7VertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
+{
ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr));
_run_sobel_x = output_x != nullptr;
@@ -187,7 +197,7 @@ void CLSobel7x7VertKernel::configure(const ICLTensor *input_x, const ICLTensor *
// Create kernel
const std::string kernel_name = std::string("sobel_separable7x1");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts);
const ICLTensor *input = _run_sobel_x ? _input_x : _input_y;
diff --git a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
index d7a1778e26..112d864827 100644
--- a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
@@ -220,6 +220,11 @@ CLLogits1DMaxShiftExpSumKernel::CLLogits1DMaxShiftExpSumKernel()
void CLLogits1DMaxShiftExpSumKernel::configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, max, output, sum, info);
+}
+
+void CLLogits1DMaxShiftExpSumKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, max, sum, output);
// Output auto initialization if not yet initialized
@@ -277,7 +282,7 @@ void CLLogits1DMaxShiftExpSumKernel::configure(const ICLTensor *input, ICLTensor
}
// Create kernel.
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set static arguments. Both the kernels use the same arguments
unsigned int idx = 4 * num_arguments_per_3D_tensor(); //Skip the input and output parameters
@@ -343,6 +348,11 @@ CLLogits1DNormKernel::CLLogits1DNormKernel()
void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, sum, output, info);
+}
+
+void CLLogits1DNormKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output);
// Note: output should always have a scale of 1/256 and offset 0
@@ -376,7 +386,7 @@ void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *su
// Create kernel
std::string kernel_name = is_quantized_asymmetric ? "softmax_layer_norm_quantized" : "softmax_layer_norm";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure window
auto win_config = validate_and_configure_window_1DNorm(input->info(), output->info(), sum->info(), info);
diff --git a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp
index 1b02eef040..520924e764 100644
--- a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp
+++ b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp
@@ -91,6 +91,11 @@ CLSpaceToBatchLayerKernel::CLSpaceToBatchLayerKernel()
void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const ICLTensor *block_shape, const ICLTensor *paddings, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, block_shape, paddings, output);
+}
+
+void CLSpaceToBatchLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, const ICLTensor *paddings, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), block_shape->info(), paddings->info(), output->info()));
@@ -113,7 +118,7 @@ void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const ICLTenso
build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
build_opts.add_option("-DHEIGHT_IN=" + support::cpp11::to_string(input->info()->dimension(idx_height)));
build_opts.add_option("-DBATCH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_batch)));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("space_to_batch_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
+ _kernel = create_kernel(compile_context, "space_to_batch_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*output->info(), Steps());
@@ -123,6 +128,13 @@ void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const ICLTenso
void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, padding_left, padding_right, output);
+}
+
+void CLSpaceToBatchLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left,
+ const Size2D &padding_right,
+ ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
TensorShape output_shape = misc::shape_calculator::compute_space_to_batch_shape(input->info(), block_shape_x, block_shape_y, padding_left, padding_right);
@@ -153,7 +165,7 @@ void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const int bloc
build_opts.add_option("-DPAD_RIGHT_X=" + support::cpp11::to_string(padding_right.x()));
build_opts.add_option("-DPAD_LEFT_Y=" + support::cpp11::to_string(padding_left.y()));
build_opts.add_option("-DPAD_RIGHT_Y=" + support::cpp11::to_string(padding_right.y()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("space_to_batch_static_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
+ _kernel = create_kernel(compile_context, "space_to_batch_static_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*output->info(), Steps());
diff --git a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp
index a7e3777bab..b4bd3b8fbe 100644
--- a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp
+++ b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp
@@ -68,6 +68,11 @@ CLSpaceToDepthLayerKernel::CLSpaceToDepthLayerKernel()
void CLSpaceToDepthLayerKernel::configure(const ICLTensor *input, ICLTensor *output, int32_t block_shape)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, block_shape);
+}
+
+void CLSpaceToDepthLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
TensorShape output_shape = compute_depth_to_space_shape(input->info(), block_shape);
@@ -88,7 +93,7 @@ void CLSpaceToDepthLayerKernel::configure(const ICLTensor *input, ICLTensor *out
build_opts.add_option("-DCHANNEL_SIZE=" + support::cpp11::to_string(output->info()->dimension(idx_channel)));
build_opts.add_option("-DBLOCK_SHAPE=" + support::cpp11::to_string(block_shape));
build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(output->info()->dimension(idx_width)));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("space_to_depth_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
+ _kernel = create_kernel(compile_context, "space_to_depth_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
// Configure kernel window
Window win = calculate_max_window(*output->info(), Steps());
diff --git a/src/core/CL/kernels/CLStackLayerKernel.cpp b/src/core/CL/kernels/CLStackLayerKernel.cpp
index 2da35d4cdc..bc8645b381 100644
--- a/src/core/CL/kernels/CLStackLayerKernel.cpp
+++ b/src/core/CL/kernels/CLStackLayerKernel.cpp
@@ -82,6 +82,11 @@ CLStackLayerKernel::CLStackLayerKernel()
void CLStackLayerKernel::configure(const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, axis, idx_input, num_tensors, output);
+}
+
+void CLStackLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), axis, idx_input, num_tensors, output->info()));
@@ -99,7 +104,7 @@ void CLStackLayerKernel::configure(const ICLTensor *input, unsigned int axis, un
build_opts.add_option("-DDST_DIM3=" + support::cpp11::to_string(output->info()->dimension(3)));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("stack_layer", build_opts.options()));
+ _kernel = create_kernel(compile_context, "stack_layer", build_opts.options());
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
diff --git a/src/core/CL/kernels/CLStridedSliceKernel.cpp b/src/core/CL/kernels/CLStridedSliceKernel.cpp
index 63478f1002..99c0b0b312 100644
--- a/src/core/CL/kernels/CLStridedSliceKernel.cpp
+++ b/src/core/CL/kernels/CLStridedSliceKernel.cpp
@@ -102,6 +102,13 @@ void CLStridedSliceKernel::configure(const ICLTensor *input, ICLTensor *output,
const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
+}
+
+void CLStridedSliceKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output,
+ const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
+ int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask));
@@ -157,7 +164,7 @@ void CLStridedSliceKernel::configure(const ICLTensor *input, ICLTensor *output,
"-DDST_DEPTH=1");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("strided_slice", build_opts.options()));
+ _kernel = create_kernel(compile_context, "strided_slice", build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = "strided_slice";
diff --git a/src/core/CL/kernels/CLTableLookupKernel.cpp b/src/core/CL/kernels/CLTableLookupKernel.cpp
index bbdaa37410..f6c6ffbae8 100644
--- a/src/core/CL/kernels/CLTableLookupKernel.cpp
+++ b/src/core/CL/kernels/CLTableLookupKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,6 +38,11 @@ using namespace arm_compute;
void CLTableLookupKernel::configure(const ICLTensor *input, const ICLLut *lut, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, lut, output);
+}
+
+void CLTableLookupKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLLut *lut, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S16);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16);
ARM_COMPUTE_ERROR_ON(lut == nullptr);
@@ -46,7 +51,7 @@ void CLTableLookupKernel::configure(const ICLTensor *input, const ICLLut *lut, I
// Create kernel
std::string kernel_name = (DataType::S16 == lut->type()) ? "tablelookup_S16" : "tablelookup_U8";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ _kernel = create_kernel(compile_context, kernel_name);
// Set lut argument
unsigned int idx = 2 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLThresholdKernel.cpp b/src/core/CL/kernels/CLThresholdKernel.cpp
index 6e07cefc77..3a94faca4b 100644
--- a/src/core/CL/kernels/CLThresholdKernel.cpp
+++ b/src/core/CL/kernels/CLThresholdKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,6 +37,12 @@ using namespace arm_compute;
void CLThresholdKernel::configure(const ICLTensor *input, ICLTensor *output, uint8_t threshold,
uint8_t false_value, uint8_t true_value, ThresholdType type, uint8_t upper)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, threshold, false_value, true_value, type, upper);
+}
+
+void CLThresholdKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, uint8_t threshold,
+ uint8_t false_value, uint8_t true_value, ThresholdType type, uint8_t upper)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
@@ -57,7 +63,7 @@ void CLThresholdKernel::configure(const ICLTensor *input, ICLTensor *output, uin
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ _kernel = create_kernel(compile_context, kernel_name);
// Set arguments
unsigned int idx = 2 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLTileKernel.cpp b/src/core/CL/kernels/CLTileKernel.cpp
index b205e3a8af..5db69d32e1 100644
--- a/src/core/CL/kernels/CLTileKernel.cpp
+++ b/src/core/CL/kernels/CLTileKernel.cpp
@@ -69,6 +69,11 @@ CLTileKernel::CLTileKernel()
void CLTileKernel::configure(const ICLTensor *input, ICLTensor *output, const Multiples &multiples)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, multiples);
+}
+
+void CLTileKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Multiples &multiples)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Auto initialize output
@@ -97,7 +102,7 @@ void CLTileKernel::configure(const ICLTensor *input, ICLTensor *output, const Mu
build_opts.add_option("-DDST_DEPTH=" + support::cpp11::to_string(output->info()->dimension(2)));
build_opts.add_option_if(multi_access_x, "-DOFFSET=" + support::cpp11::to_string(offset));
build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("tile", build_opts.options()));
+ _kernel = create_kernel(compile_context, "tile", build_opts.options());
// Configure window without padding
Window win = calculate_max_window(*output->info());
diff --git a/src/core/CL/kernels/CLTransposeKernel.cpp b/src/core/CL/kernels/CLTransposeKernel.cpp
index eb348fff74..37f07e65a4 100644
--- a/src/core/CL/kernels/CLTransposeKernel.cpp
+++ b/src/core/CL/kernels/CLTransposeKernel.cpp
@@ -108,6 +108,11 @@ Status CLTransposeKernel::validate(const ITensorInfo *input, const ITensorInfo *
void CLTransposeKernel::configure(const ICLTensor *input, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output);
+}
+
+void CLTransposeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Output tensor auto inizialitation if not yet initialized
@@ -123,7 +128,7 @@ void CLTransposeKernel::configure(const ICLTensor *input, ICLTensor *output)
data_type_in_bytes << input->info()->element_size();
build_opts.emplace("-DDATA_TYPE_IN_BYTES=" + data_type_in_bytes.str());
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("transpose", build_opts));
+ _kernel = create_kernel(compile_context, "transpose", build_opts);
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info());
diff --git a/src/core/CL/kernels/CLUpsampleLayerKernel.cpp b/src/core/CL/kernels/CLUpsampleLayerKernel.cpp
index a061236d37..8df6d5dec4 100644
--- a/src/core/CL/kernels/CLUpsampleLayerKernel.cpp
+++ b/src/core/CL/kernels/CLUpsampleLayerKernel.cpp
@@ -66,6 +66,11 @@ Status CLUpsampleLayerKernel::validate(const ITensorInfo *input, const ITensorIn
void CLUpsampleLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &info, const InterpolationPolicy upsampling_policy)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, info, upsampling_policy);
+}
+
+void CLUpsampleLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &info, const InterpolationPolicy upsampling_policy)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_UNUSED(upsampling_policy);
@@ -126,7 +131,7 @@ void CLUpsampleLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
build_opts.add_option_if(multi_access_x, "-DVEC_SIZE_OUT=" + support::cpp11::to_string(num_elems_processed_per_iteration_x));
build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X_IN=" + support::cpp11::to_string(std::max<int>(_input->info()->dimension(0) - _num_elems_processed_per_iteration_input_x, 0)));
build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X_OUT=" + support::cpp11::to_string(std::max<int>(output_width_x - num_elems_processed_per_iteration_x, 0)));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("upsample_layer_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
+ _kernel = create_kernel(compile_context, "upsample_layer_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
ICLKernel::configure_internal(win);
}
diff --git a/src/core/CL/kernels/CLWarpAffineKernel.cpp b/src/core/CL/kernels/CLWarpAffineKernel.cpp
index 4aaa49128c..43bd34fcea 100644
--- a/src/core/CL/kernels/CLWarpAffineKernel.cpp
+++ b/src/core/CL/kernels/CLWarpAffineKernel.cpp
@@ -61,6 +61,11 @@ BorderSize CLWarpAffineKernel::border_size() const
void CLWarpAffineKernel::configure(const ICLTensor *input, ICLTensor *output, const std::array<float, 9> &matrix, InterpolationPolicy policy)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, matrix, policy);
+}
+
+void CLWarpAffineKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array<float, 9> &matrix, InterpolationPolicy policy)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(InterpolationPolicy::AREA == policy);
@@ -77,7 +82,7 @@ void CLWarpAffineKernel::configure(const ICLTensor *input, ICLTensor *output, co
std::string interpolation_name = string_from_interpolation_policy(policy);
std::transform(interpolation_name.begin(), interpolation_name.end(), interpolation_name.begin(), ::tolower);
const std::string kernel_name = "warp_affine_" + interpolation_name;
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, options));
+ _kernel = create_kernel(compile_context, kernel_name, options);
// Set static kernel arguments
unsigned int idx = 2 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLWarpPerspectiveKernel.cpp b/src/core/CL/kernels/CLWarpPerspectiveKernel.cpp
index e537aec058..3c47567203 100644
--- a/src/core/CL/kernels/CLWarpPerspectiveKernel.cpp
+++ b/src/core/CL/kernels/CLWarpPerspectiveKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -60,6 +60,11 @@ BorderSize CLWarpPerspectiveKernel::border_size() const
void CLWarpPerspectiveKernel::configure(const ICLTensor *input, ICLTensor *output, const std::array<float, 9> &matrix, InterpolationPolicy policy)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, matrix, policy);
+}
+
+void CLWarpPerspectiveKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array<float, 9> &matrix, InterpolationPolicy policy)
+{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(InterpolationPolicy::AREA == policy);
@@ -76,7 +81,7 @@ void CLWarpPerspectiveKernel::configure(const ICLTensor *input, ICLTensor *outpu
std::string interpolation_name = string_from_interpolation_policy(policy);
std::transform(interpolation_name.begin(), interpolation_name.end(), interpolation_name.begin(), ::tolower);
std::string kernel_name = "warp_perspective_" + interpolation_name;
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, options));
+ _kernel = create_kernel(compile_context, kernel_name, options);
// Set static kernel arguments
unsigned int idx = 2 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
diff --git a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp b/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
index 373cbe51ba..a0db660414 100644
--- a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
+++ b/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
@@ -79,6 +79,11 @@ CLWeightsReshapeKernel::CLWeightsReshapeKernel()
void CLWeightsReshapeKernel::configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, biases, output, num_groups);
+}
+
+void CLWeightsReshapeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Output tensor auto inizialitation if not yet initialized
@@ -102,7 +107,7 @@ void CLWeightsReshapeKernel::configure(const ICLTensor *input, const ICLTensor *
build_opts.add_option_if(biases != nullptr, "-DHAS_BIAS");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reshape_to_columns", build_opts.options()));
+ _kernel = create_kernel(compile_context, "reshape_to_columns", build_opts.options());
// Configure window
Window win = calculate_max_window(*input->info(), Steps());
diff --git a/src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp b/src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp
index 4d52f09b48..ea549e9f46 100644
--- a/src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp
+++ b/src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp
@@ -96,6 +96,11 @@ Status CLWidthConcatenate2TensorsKernel::validate(const ITensorInfo *input1, con
void CLWidthConcatenate2TensorsKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
+}
+
+void CLWidthConcatenate2TensorsKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info()));
@@ -128,7 +133,7 @@ void CLWidthConcatenate2TensorsKernel::configure(const ICLTensor *input1, const
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("concatenate_width_x2", build_opts.options()));
+ _kernel = create_kernel(compile_context, "concatenate_width_x2", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input1->info(), input2->info(), output->info());
diff --git a/src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp b/src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp
index 0673365d83..e1ec9d1344 100644
--- a/src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp
+++ b/src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp
@@ -114,6 +114,12 @@ Status CLWidthConcatenate4TensorsKernel::validate(const ITensorInfo *input1, con
void CLWidthConcatenate4TensorsKernel::configure(const ICLTensor *input1, const ICLTensor *input2, const ICLTensor *input3, const ICLTensor *input4, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input1, input2, input3, input4, output);
+}
+
+void CLWidthConcatenate4TensorsKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, const ICLTensor *input3, const ICLTensor *input4,
+ ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, input3, input4, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), input3->info(), input4->info(), output->info()));
@@ -156,7 +162,7 @@ void CLWidthConcatenate4TensorsKernel::configure(const ICLTensor *input1, const
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("concatenate_width_x4", build_opts.options()));
+ _kernel = create_kernel(compile_context, "concatenate_width_x4", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input1->info(), input2->info(), input3->info(), input4->info(), output->info());
diff --git a/src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp
index eaefe666f2..9ff373b18d 100644
--- a/src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp
@@ -92,6 +92,11 @@ Status CLWidthConcatenateLayerKernel::validate(const ITensorInfo *input, unsigne
void CLWidthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned int width_offset, ICLTensor *output)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, width_offset, output);
+}
+
+void CLWidthConcatenateLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int width_offset, ICLTensor *output)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), width_offset, output->info()));
@@ -118,7 +123,7 @@ void CLWidthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned i
}
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("concatenate_width", build_opts.options()));
+ _kernel = create_kernel(compile_context, "concatenate_width", build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), width_offset, output->info());
ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
diff --git a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp
index b4135f26d5..38649126b7 100644
--- a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp
+++ b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp
@@ -101,6 +101,11 @@ CLWinogradFilterTransformKernel::CLWinogradFilterTransformKernel()
void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info);
+}
+
+void CLWinogradFilterTransformKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Output auto initialization if not yet initialized
@@ -119,7 +124,7 @@ void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTenso
// Create kernel
std::string kernel_name = "winograd_filter_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_" + lower_string(string_from_data_layout(input->info()->data_layout()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
_input = input;
_output = output;
diff --git a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
index 5b76a3d24f..cf882ae9ac 100644
--- a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
+++ b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
@@ -110,6 +110,11 @@ BorderSize CLWinogradInputTransformKernel::border_size() const
void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info);
+}
+
+void CLWinogradInputTransformKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info));
@@ -195,7 +200,7 @@ void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor
kernel_name += support::cpp11::to_string(_step_z);
kernel_name += "_" + lower_string(string_from_data_layout(_data_layout));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Create window and update padding
auto win_config = validate_and_configure_window(input->info(), output->info(), winograd_info);
diff --git a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp
index 0bd3b28d73..f08b5ac7c8 100644
--- a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp
+++ b/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp
@@ -137,6 +137,12 @@ CLWinogradOutputTransformKernel::CLWinogradOutputTransformKernel()
void CLWinogradOutputTransformKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, winograd_info, act_info);
+}
+
+void CLWinogradOutputTransformKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info,
+ const ActivationLayerInfo &act_info)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Output tensor auto initialization if not yet initialized
@@ -188,7 +194,7 @@ void CLWinogradOutputTransformKernel::configure(const ICLTensor *input, const IC
// Create kernel
std::string kernel_name = "winograd_output_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_" + lower_string(string_from_data_layout(winograd_info.output_data_layout));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), winograd_info.output_tile_size);
diff --git a/src/core/CL/kernels/CLYOLOLayerKernel.cpp b/src/core/CL/kernels/CLYOLOLayerKernel.cpp
index f634d912eb..ee119233a4 100644
--- a/src/core/CL/kernels/CLYOLOLayerKernel.cpp
+++ b/src/core/CL/kernels/CLYOLOLayerKernel.cpp
@@ -102,6 +102,11 @@ CLYOLOLayerKernel::CLYOLOLayerKernel()
void CLYOLOLayerKernel::configure(ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes)
{
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, act_info, num_classes);
+}
+
+void CLYOLOLayerKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes)
+{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
_run_in_place = (output == nullptr) || (output == input);
@@ -127,7 +132,7 @@ void CLYOLOLayerKernel::configure(ICLTensor *input, ICLTensor *output, const Act
// Create kernel
std::string kernel_name = std::string("yolo_layer_") + lower_string(string_from_data_layout(input->info()->data_layout()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Make sure _kernel is initialized before calling the parent's configure
_input = input;