aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2020-04-21 13:29:30 +0100
committerManuel Bottini <manuel.bottini@arm.com>2020-04-22 08:57:15 +0000
commit256c0b9d5d66d35d52c7eee3599a4d91e7887ec8 (patch)
tree6bb2bd6648ed3a7292d63c731a679bb7cb2cf3d3
parent7ba240bb54bdbd50f1809f944dde2fac7005cc10 (diff)
downloadComputeLibrary-256c0b9d5d66d35d52c7eee3599a4d91e7887ec8.tar.gz
COMPMID-3280: Make all ML primitives for CL use the new interface - Part1 - Fix1
- const fix in the CLKernels part 1 Change-Id: I17340cb6ff26afd52b14b46645efedbe07aef1b6 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3067 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/CL/CLHelpers.h2
-rw-r--r--arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLAccumulateKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLActivationLayerKernel.h8
-rw-r--r--arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h5
-rw-r--r--arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLBitwiseAndKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBitwiseNotKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBitwiseOrKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBitwiseXorKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBox3x3Kernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLCannyEdgeKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLChannelCombineKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLChannelExtractKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLCol2ImKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLColorConvertKernel.h8
-rw-r--r--arm_compute/core/CL/kernels/CLComparisonKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLConvolutionKernel.h8
-rw-r--r--arm_compute/core/CL/kernels/CLCopyKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLCropKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDerivativeKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDilateKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLErodeKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFFTScaleKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFastCornersKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLFillBorderKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFlattenLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFloorKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h2
-rw-r--r--src/core/CL/CLHelpers.cpp2
-rw-r--r--src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLAccumulateKernel.cpp6
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLBitwiseAndKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLBitwiseNotKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLBitwiseOrKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLBitwiseXorKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLBox3x3Kernel.cpp2
-rw-r--r--src/core/CL/kernels/CLCannyEdgeKernel.cpp6
-rw-r--r--src/core/CL/kernels/CLChannelCombineKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLChannelExtractKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLCol2ImKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLColorConvertKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLComparisonKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLConvolutionKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLCopyKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLCropKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDepthConvertLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDequantizationLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDerivativeKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDilateKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLElementwiseOperationKernel.cpp6
-rw-r--r--src/core/CL/kernels/CLErodeKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLFFTDigitReverseKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLFFTRadixStageKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLFFTScaleKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLFastCornersKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLFillBorderKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLFlattenLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLFloorKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp2
99 files changed, 136 insertions, 137 deletions
diff --git a/arm_compute/core/CL/CLHelpers.h b/arm_compute/core/CL/CLHelpers.h
index 77c17c7d9c..c5db66c664 100644
--- a/arm_compute/core/CL/CLHelpers.h
+++ b/arm_compute/core/CL/CLHelpers.h
@@ -206,7 +206,7 @@ cl::Kernel create_opencl_kernel(CLCoreRuntimeContext *ctx, const std::string &ke
*
* @return An opencl kernel
*/
-cl::Kernel create_kernel(CLCompileContext &ctx, const std::string &kernel_name, const std::set<std::string> &build_opts = std::set<std::string>());
+cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set<std::string> &build_opts = std::set<std::string>());
/** Creates a suitable LWS hint object for parallel implementations. Sets the number of WG based on the input size.
* If input width is smaller than 128 we can use fewer threads than 8.
diff --git a/arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h b/arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h
index 18896725e2..58dea3bdae 100644
--- a/arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h
+++ b/arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h
@@ -65,7 +65,7 @@ public:
* @param[in] input2 Source tensor. Data types supported: U8/S16.
* @param[out] output Destination tensor. Data types supported: U8/S16.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLAccumulateKernel.h b/arm_compute/core/CL/kernels/CLAccumulateKernel.h
index d7cb09fdd3..f639148e25 100644
--- a/arm_compute/core/CL/kernels/CLAccumulateKernel.h
+++ b/arm_compute/core/CL/kernels/CLAccumulateKernel.h
@@ -52,7 +52,7 @@ public:
* @param[in] input Source tensor. Data types supported: U8.
* @param[out] accum Destination tensor. Data types supported: S16.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *accum);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *accum);
};
/** Interface for the accumulate weighted kernel.
@@ -81,7 +81,7 @@ public:
* @param[in] alpha Scalar value in the range [0, 1.0]. Data types supported: F32.
* @param[in,out] accum Accumulated tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, float alpha, ICLTensor *accum);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, float alpha, ICLTensor *accum);
};
/** Interface for the accumulate squared kernel.
@@ -108,7 +108,7 @@ public:
* @param[in] shift Shift value in the range of [0, 15]. Data types supported: U32.
* @param[in,out] accum Accumulated tensor. Data types supported: S16.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, uint32_t shift, ICLTensor *accum);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, uint32_t shift, ICLTensor *accum);
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLACCUMULATEKERNEL_H */
diff --git a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
index d25480cd60..1e83a689cd 100644
--- a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
@@ -65,7 +65,7 @@ public:
* @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] act_info Activation layer information.
*/
- void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayerKernel
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
@@ -81,9 +81,9 @@ public:
void run(const Window &window, cl::CommandQueue &queue) override;
private:
- ICLTensor *_input;
- ICLTensor *_output;
- bool _run_in_place;
+ ICLTensor *_input;
+ ICLTensor *_output;
+ bool _run_in_place;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLACTIVATIONLAYERKERNEL_H */
diff --git a/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h b/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h
index 831cee5e58..94e8baed13 100644
--- a/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h
@@ -76,7 +76,7 @@ public:
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
* @param[in] op Reduction operation to perform. Only ArgMin and ArgMax are supported.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op);
/** Static function to check if given info will lead to a valid configuration of @ref CLArgMinMaxLayerKernel.
*
diff --git a/arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h
index 06764302f4..163666853c 100644
--- a/arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h
@@ -72,7 +72,7 @@ public:
* @note: The gaps between the two lowest dimensions of input and output need to be divisible by 2.
*
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int batch_offset, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int batch_offset, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchConcatenateLayerKernel
*
* @param[in] input Input tensor info. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
index 564b21680b..8eaaca845a 100644
--- a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
@@ -81,9 +81,8 @@ public:
* @param[in] epsilon (Optional) Small value to avoid division with zero. Default value is 0.001f.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
*/
- void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta = nullptr, const ICLTensor *gamma = nullptr,
- float epsilon = 0.001f,
- ActivationLayerInfo act_info = ActivationLayerInfo());
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta = nullptr,
+ const ICLTensor *gamma = nullptr, float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayerKernel
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
diff --git a/arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h
index f9289eab73..2b12ad094a 100644
--- a/arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h
@@ -61,7 +61,7 @@ public:
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
/** Initialise the kernel's inputs and output (Static block shape).
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
@@ -78,7 +78,7 @@ public:
* @param[in] block_shape_y Block shape y value.
* @param[out] output Tensor output. Data types supported: same as @p input
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchToSpaceLayerKernel
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLBitwiseAndKernel.h b/arm_compute/core/CL/kernels/CLBitwiseAndKernel.h
index 6c60bc0f33..8defe32862 100644
--- a/arm_compute/core/CL/kernels/CLBitwiseAndKernel.h
+++ b/arm_compute/core/CL/kernels/CLBitwiseAndKernel.h
@@ -62,7 +62,7 @@ public:
* @param[in] input2 Source tensor. Data types supported: U8.
* @param[out] output Destination tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBitwiseNotKernel.h b/arm_compute/core/CL/kernels/CLBitwiseNotKernel.h
index 0522841e73..b86ce7f173 100644
--- a/arm_compute/core/CL/kernels/CLBitwiseNotKernel.h
+++ b/arm_compute/core/CL/kernels/CLBitwiseNotKernel.h
@@ -50,7 +50,7 @@ public:
* @param[in] input Source tensor. Data types supported: U8.
* @param[out] output Destination tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_CLBITWISENOTKERNEL_H */
diff --git a/arm_compute/core/CL/kernels/CLBitwiseOrKernel.h b/arm_compute/core/CL/kernels/CLBitwiseOrKernel.h
index 151f19d374..65eb50f0fd 100644
--- a/arm_compute/core/CL/kernels/CLBitwiseOrKernel.h
+++ b/arm_compute/core/CL/kernels/CLBitwiseOrKernel.h
@@ -62,7 +62,7 @@ public:
* @param[in] input2 Source tensor. Data types supported: U8.
* @param[out] output Destination tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBitwiseXorKernel.h b/arm_compute/core/CL/kernels/CLBitwiseXorKernel.h
index 03c1e05da4..5c63a7f22c 100644
--- a/arm_compute/core/CL/kernels/CLBitwiseXorKernel.h
+++ b/arm_compute/core/CL/kernels/CLBitwiseXorKernel.h
@@ -62,7 +62,7 @@ public:
* @param[in] input2 Source tensor. Data types supported: U8.
* @param[out] output Destination tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h b/arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h
index ffa63bd5a4..bbe11562ed 100644
--- a/arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h
+++ b/arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h
@@ -71,7 +71,7 @@ public:
* @note Only single image prediction is supported. Height and Width (and scale) of the image will be contained in the BoundingBoxTransformInfo struct.
*
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLBoundingBoxTransform
*
diff --git a/arm_compute/core/CL/kernels/CLBox3x3Kernel.h b/arm_compute/core/CL/kernels/CLBox3x3Kernel.h
index 572ae87d9a..ea3c1c1f3e 100644
--- a/arm_compute/core/CL/kernels/CLBox3x3Kernel.h
+++ b/arm_compute/core/CL/kernels/CLBox3x3Kernel.h
@@ -50,7 +50,7 @@ public:
* @param[out] output The output tensor. Data types supported: U8.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
//Inherited methods overriden:
BorderSize border_size() const override;
diff --git a/arm_compute/core/CL/kernels/CLCannyEdgeKernel.h b/arm_compute/core/CL/kernels/CLCannyEdgeKernel.h
index 67c23dd811..40ad4dcd84 100644
--- a/arm_compute/core/CL/kernels/CLCannyEdgeKernel.h
+++ b/arm_compute/core/CL/kernels/CLCannyEdgeKernel.h
@@ -65,7 +65,7 @@ public:
* @param[out] phase Destination tensor - Quantized phase. Data types supported: U8.
* @param[in] norm_type Normalization type. if 1, L1-Norm otherwise L2-Norm.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, int32_t norm_type);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, int32_t norm_type);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -111,7 +111,7 @@ public:
* @param[in] lower_thr Lower threshold.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *magnitude, const ICLTensor *phase, ICLTensor *output, int32_t lower_thr, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *magnitude, const ICLTensor *phase, ICLTensor *output, int32_t lower_thr, bool border_undefined);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -167,7 +167,7 @@ public:
* @param[in,out] l1_stack_counter Tensor for counting the elements in the L1 stack of each pixel. Data types supported: U8.
* Expected to be initialized to 0 before each run.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t upper_thr, int32_t lower_thr,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t upper_thr, int32_t lower_thr,
ICLTensor *visited, ICLTensor *recorded, ICLTensor *l1_stack, ICLTensor *l1_stack_counter);
// Inherited methods overridden:
diff --git a/arm_compute/core/CL/kernels/CLChannelCombineKernel.h b/arm_compute/core/CL/kernels/CLChannelCombineKernel.h
index 60d0bd4a45..32ddf152c3 100644
--- a/arm_compute/core/CL/kernels/CLChannelCombineKernel.h
+++ b/arm_compute/core/CL/kernels/CLChannelCombineKernel.h
@@ -69,7 +69,7 @@ public:
* @param[in] plane3 The 2D plane that forms channel 3. Must be of U8 format.
* @param[out] output The single planar output tensor.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output);
/** Configure function's inputs and outputs.
*
* @param[in] plane0 The 2D plane that forms channel 0. Must be of U8 format.
@@ -86,7 +86,7 @@ public:
* @param[in] plane2 The 2D plane that forms channel 2. Must be of U8 format.
* @param[out] output The multi planar output tensor.
*/
- void configure(CLCompileContext &compile_context, const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLChannelExtractKernel.h b/arm_compute/core/CL/kernels/CLChannelExtractKernel.h
index 1f2cc8900a..6a0c4bb94e 100644
--- a/arm_compute/core/CL/kernels/CLChannelExtractKernel.h
+++ b/arm_compute/core/CL/kernels/CLChannelExtractKernel.h
@@ -65,7 +65,7 @@ public:
* @param[in] channel Channel to extract.
* @param[out] output Destination tensor. Must be of U8 format.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, Channel channel, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, Channel channel, ICLTensor *output);
/** Set the input and output of the kernel
*
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV/YUV444
@@ -80,7 +80,7 @@ public:
* @param[in] channel Channel to extract.
* @param[out] output Single-planar 2D destination image. Must be of U8 format.
*/
- void configure(CLCompileContext &compile_context, const ICLMultiImage *input, Channel channel, ICLImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLMultiImage *input, Channel channel, ICLImage *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h b/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h
index 921c20df10..14b59d325f 100644
--- a/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h
@@ -60,7 +60,7 @@ public:
* @param[out] output Output tensor. Data type supported: Same as @p input
* @param[in] num_groups Number of groups. Must be greater than 1 and the number of channels of the tensors must be a multiple of the number of groups.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups);
/** Static function to check if given info will lead to a valid configuration of @ref CLChannelShuffleLayerKernel
*
* @param[in] input Input tensor info. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLCol2ImKernel.h b/arm_compute/core/CL/kernels/CLCol2ImKernel.h
index 6ef424853e..d0528ed21a 100644
--- a/arm_compute/core/CL/kernels/CLCol2ImKernel.h
+++ b/arm_compute/core/CL/kernels/CLCol2ImKernel.h
@@ -81,7 +81,7 @@ public:
* @param[in] convolved_dims Output convolved dimensions.
* @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups = 1);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups = 1);
/** Static function to check if given info will lead to a valid configuration of @ref CLCol2ImKernel
*
* @param[in] input The input tensor to convert. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
diff --git a/arm_compute/core/CL/kernels/CLColorConvertKernel.h b/arm_compute/core/CL/kernels/CLColorConvertKernel.h
index 25b95eb42c..2bcd141863 100644
--- a/arm_compute/core/CL/kernels/CLColorConvertKernel.h
+++ b/arm_compute/core/CL/kernels/CLColorConvertKernel.h
@@ -67,7 +67,7 @@ public:
* RGBA8888 (if the formats of @p input are UYVY422/YUYV422/RGB888/),
* U8 (if the formats of @p input is RGB888)
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
/** Set the input and output of the kernel
*
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV
@@ -80,7 +80,7 @@ public:
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV
* @param[out] output Single-planar destination image. Formats supported: RGB888/RGBA8888
*/
- void configure(CLCompileContext &compile_context, const ICLMultiImage *input, ICLImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLMultiImage *input, ICLImage *output);
/** Set the input and output of the kernel
*
* @param[in] input Single-planar source image. Formats supported: RGB888/RGBA8888/UYVY422/YUYV422
@@ -93,7 +93,7 @@ public:
* @param[in] input Single-planar source image. Formats supported: RGB888/RGBA8888/UYVY422/YUYV422
* @param[out] output Multi-planar destination image. Formats supported: NV12/IYUV/YUV444 (if the formats of @p input are RGB888/RGB8888)
*/
- void configure(CLCompileContext &compile_context, const ICLImage *input, ICLMultiImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLImage *input, ICLMultiImage *output);
/** Set the input and output of the kernel
*
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV
@@ -106,7 +106,7 @@ public:
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV
* @param[out] output Multi-planar destination image. Formats supported: YUV444/IYUV (if the formats of @p input are NV12/NV21)/NV12 (if the format of @p input is IYUV)
*/
- void configure(CLCompileContext &compile_context, const ICLMultiImage *input, ICLMultiImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLMultiImage *input, ICLMultiImage *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLComparisonKernel.h b/arm_compute/core/CL/kernels/CLComparisonKernel.h
index 15779938b2..d5c5297c61 100644
--- a/arm_compute/core/CL/kernels/CLComparisonKernel.h
+++ b/arm_compute/core/CL/kernels/CLComparisonKernel.h
@@ -64,7 +64,7 @@ public:
* @param[out] output Destination tensor. Data types supported: U8.
* @param[in] operation Comparison operation to use.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation);
/** Static function to check if given info will lead to a valid configuration of @ref CLComparisonKernel
*
* @param[in] input1 Source tensor. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
index f7e212e1e4..d3e57a6738 100644
--- a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
+++ b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
@@ -69,7 +69,7 @@ public:
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
/** Static function to check if given info will lead to a valid configuration of @ref CLConvertFullyConnectedWeightsKernel
*
* @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLConvolutionKernel.h b/arm_compute/core/CL/kernels/CLConvolutionKernel.h
index e1cdc88007..b6fe51dbaa 100644
--- a/arm_compute/core/CL/kernels/CLConvolutionKernel.h
+++ b/arm_compute/core/CL/kernels/CLConvolutionKernel.h
@@ -70,7 +70,7 @@ public:
* @param[in] scale Scale of the convolution matrix. If 0 is passed, it will be set to the sum of the coefficients of the convolution or 1 if they add up to 0.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined);
// Inherited methods overridden:
BorderSize border_size() const override;
@@ -112,7 +112,7 @@ public:
* @param[in] conv Convolution matrix to apply to the input tensor.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, bool border_undefined);
// Inherited methods overridden:
BorderSize border_size() const override;
@@ -153,7 +153,7 @@ public:
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
* @param[in] data_type Data type to use for intermeidate result. @sa data_type_for_convolution
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined, DataType data_type = DataType::S32);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined, DataType data_type = DataType::S32);
// Inherited methods overridden:
BorderSize border_size() const override;
@@ -209,7 +209,7 @@ public:
* @param[in] scale Scale of the convolution matrix. If 0 is passed, it will be set to the sum of the coefficients of the convolution or 1 if they add up to 0.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale, bool border_undefined);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLCopyKernel.h b/arm_compute/core/CL/kernels/CLCopyKernel.h
index 1774f8ccad..05dff8ed0c 100644
--- a/arm_compute/core/CL/kernels/CLCopyKernel.h
+++ b/arm_compute/core/CL/kernels/CLCopyKernel.h
@@ -61,7 +61,7 @@ public:
* @param[in] padding (Optional) Padding to be applied to the input tensor
* @param[in] output_window (Optional) Window to be used in case only copying into part of a tensor. Default is nullptr.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding = PaddingList(), Window *output_window = nullptr);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding = PaddingList(), Window *output_window = nullptr);
/** Static function to check if given info will lead to a valid configuration of @ref CLCopyKernel
*
* @param[in] input Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
diff --git a/arm_compute/core/CL/kernels/CLCropKernel.h b/arm_compute/core/CL/kernels/CLCropKernel.h
index 103986a5f8..a1c6f901eb 100644
--- a/arm_compute/core/CL/kernels/CLCropKernel.h
+++ b/arm_compute/core/CL/kernels/CLCropKernel.h
@@ -71,7 +71,7 @@ public:
* @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0.
* @param[in] output_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0,
Window *output_window = nullptr);
/** Static function to check if given info will lead to a valid configuration of @ref CLStridedSliceKernel
diff --git a/arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h b/arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h
index 7e8a45fd8f..0c65f519cc 100644
--- a/arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h
+++ b/arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h
@@ -62,7 +62,7 @@ public:
* @param[out] output Destination tensor. Data types supported: same as @p input. All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane.
* @param[in] info Contains padding and stride information described in @ref PadStrideInfo.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PadStrideInfo &info);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PadStrideInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayerUpsample
*
* @param[in] input Source tensor info. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h b/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
index daeb8c1f9c..292c561e46 100644
--- a/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
+++ b/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
@@ -79,7 +79,7 @@ public:
* @param[in] weights_info Deconvolution weights tensor info. Supported data types: same as @p input. Supported data layouts: same as @p input.
* @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. This kernel supports only stride_x = weights.width && stride_y = weights.height. Moreover, padding is not supported.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
const PadStrideInfo &deconv_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionReshapeOutputKernel.
diff --git a/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
index 7b594417d6..5fe826d090 100644
--- a/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
@@ -72,7 +72,7 @@ public:
* @note: The gaps between the two lowest dimensions of input and output need to be divisible by 2.
*
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int depth_offset, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int depth_offset, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthConcatenateLayerKernel
*
* @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
diff --git a/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h b/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
index 8bbf9b3dce..66eb6222b2 100644
--- a/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
@@ -75,7 +75,7 @@ public:
* @param[in] policy Conversion policy
* @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthConvertLayerKernel
*
* @param[in] input Source tensor info. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
diff --git a/arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h b/arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h
index 541506b521..87ac3c1ec1 100644
--- a/arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h
@@ -61,7 +61,7 @@ public:
* @param[out] output Tensor output. Data types supported: same as @p input
* @param[in] block_shape Block shape value.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthToSpaceLayerKernel.
*
* @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
index f68fde4737..6cf0326467 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
@@ -75,7 +75,7 @@ public:
* @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override;
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NCHWKernel
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
index f9fda0a42c..e564cf6fe0 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
@@ -76,7 +76,7 @@ public:
* @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override;
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
index db26b4a06f..8847cf9c46 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
@@ -87,7 +87,7 @@ public:
* @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h
index e7fc6f8d81..8dc5d32e4f 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h
@@ -59,7 +59,7 @@ public:
* @param[out] output The output tensor of dimension [W*H*C0, ceil(IFM/C0)]. C0 is the number of channels read by each thread. Data types supported: same as @p weights.
* @param[in] info Depthwise convolution information to reshape the input tensor.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
*
diff --git a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
index 4cb1339300..bb154f1a5b 100644
--- a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
@@ -58,7 +58,7 @@ public:
* @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] output Destination tensor. Data types supported: F16/F32.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayerKernel
*
* @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
diff --git a/arm_compute/core/CL/kernels/CLDerivativeKernel.h b/arm_compute/core/CL/kernels/CLDerivativeKernel.h
index 5d5ad860f3..cd8ae90c2d 100644
--- a/arm_compute/core/CL/kernels/CLDerivativeKernel.h
+++ b/arm_compute/core/CL/kernels/CLDerivativeKernel.h
@@ -66,7 +66,7 @@ public:
* @param[out] output_y (Optional) Destination tensor for the Y gradient, Data types supported: S16.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLDilateKernel.h b/arm_compute/core/CL/kernels/CLDilateKernel.h
index 9c41a84b31..45f5fe0764 100644
--- a/arm_compute/core/CL/kernels/CLDilateKernel.h
+++ b/arm_compute/core/CL/kernels/CLDilateKernel.h
@@ -50,7 +50,7 @@ public:
* @param[out] output The output tensor. Data types supported: U8.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
// Inherited methods overridden:
BorderSize border_size() const override;
diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
index f1409b6339..489d7c27c5 100644
--- a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
@@ -88,7 +88,7 @@ public:
* The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayerKernel
*
* @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
diff --git a/arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h b/arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h
index 1f76992b96..e190bdebbe 100644
--- a/arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h
@@ -50,7 +50,7 @@ public:
* @param[out] output Output tensor. Data types supported: Same as @p input.
* @param[in] op Element wise unary operation to perform.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ElementWiseUnary &op);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ElementWiseUnary &op);
/** Static function to check if given info will lead to a valid configuration of @ref CLElementWiseUnaryLayerKernel
*
* @param[in] input First tensor input info. Data types supported: F16/F32.
diff --git a/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h b/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h
index 2f1060126a..4d3d4bc834 100644
--- a/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h
+++ b/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h
@@ -99,7 +99,7 @@ protected:
/** Commmon configure function for element-wise operators with no additional options (e.g., Div, Min, Max, SquaredDiff)
*
*/
- void configure_common(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure_common(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
ActivationLayerInfo _act_info;
@@ -138,7 +138,7 @@ public:
* @param[in] policy Policy to use to handle overflow.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy,
+ void configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy,
const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel
@@ -194,7 +194,7 @@ public:
* @param[in] output Output tensor. Data types supported: Same as @p input1.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
+ void configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel
diff --git a/arm_compute/core/CL/kernels/CLErodeKernel.h b/arm_compute/core/CL/kernels/CLErodeKernel.h
index 8ba6ff8408..cbc748194c 100644
--- a/arm_compute/core/CL/kernels/CLErodeKernel.h
+++ b/arm_compute/core/CL/kernels/CLErodeKernel.h
@@ -50,7 +50,7 @@ public:
* @param[out] output The output tensor. Data types supported: U8.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
// Inherited methods overridden:
BorderSize border_size() const override;
diff --git a/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h b/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h
index eac03ff868..a8da1246bb 100644
--- a/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h
+++ b/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h
@@ -65,7 +65,7 @@ public:
* @param[in] idx Digit reverse index tensor. Data type supported: U32
* @param[in] config Kernel configuration.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTDigitReverseKernel
*
* @param[in] input Source tensor info. Data types supported: F32.
diff --git a/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h b/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h
index 85bf4cce66..e3f53462d9 100644
--- a/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h
+++ b/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h
@@ -69,7 +69,7 @@ public:
* @param[out] output Destination tensor. Can be nullptr. Data type supported: same as @p input
* @param[in] config FFT descriptor metadata.
*/
- void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTRadixStageKernel
*
* @param[in] input Source tensor info. Data types supported: F32.
diff --git a/arm_compute/core/CL/kernels/CLFFTScaleKernel.h b/arm_compute/core/CL/kernels/CLFFTScaleKernel.h
index cd4fe58b9c..d0d2b7613c 100644
--- a/arm_compute/core/CL/kernels/CLFFTScaleKernel.h
+++ b/arm_compute/core/CL/kernels/CLFFTScaleKernel.h
@@ -63,7 +63,7 @@ public:
* @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] config Kernel configuration
*/
- void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTScaleKernel
*
* @param[in] input Source tensor info. Data types supported: F32.
diff --git a/arm_compute/core/CL/kernels/CLFastCornersKernel.h b/arm_compute/core/CL/kernels/CLFastCornersKernel.h
index 2a6102036f..1a0d4e36a5 100644
--- a/arm_compute/core/CL/kernels/CLFastCornersKernel.h
+++ b/arm_compute/core/CL/kernels/CLFastCornersKernel.h
@@ -75,7 +75,7 @@ public:
* @param[in] non_max_suppression True if non-maxima suppresion is applied, false otherwise.
* @param[in] border_mode Strategy to use for borders.
*/
- void configure(CLCompileContext &compile_context, const ICLImage *input, ICLImage *output, float threshold, bool non_max_suppression, BorderMode border_mode);
+ void configure(const CLCompileContext &compile_context, const ICLImage *input, ICLImage *output, float threshold, bool non_max_suppression, BorderMode border_mode);
// Inherited methods overridden
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -119,7 +119,7 @@ public:
* @param[out] corners Array of keypoints to store the results.
* @param[out] num_buffers Number of keypoints to store the results.
*/
- void configure(CLCompileContext &compile_context, const ICLImage *input, bool update_number, ICLKeyPointArray *corners, cl::Buffer *num_buffers);
+ void configure(const CLCompileContext &compile_context, const ICLImage *input, bool update_number, ICLKeyPointArray *corners, cl::Buffer *num_buffers);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLFillBorderKernel.h b/arm_compute/core/CL/kernels/CLFillBorderKernel.h
index 226b611bcb..d00ea55a83 100644
--- a/arm_compute/core/CL/kernels/CLFillBorderKernel.h
+++ b/arm_compute/core/CL/kernels/CLFillBorderKernel.h
@@ -65,7 +65,7 @@ public:
* @param[in] border_mode Border mode to use for the convolution.
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
*/
- void configure(CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue());
+ void configure(const CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue());
/** Function to set the constant value on fill border kernel depending on type.
*
diff --git a/arm_compute/core/CL/kernels/CLFlattenLayerKernel.h b/arm_compute/core/CL/kernels/CLFlattenLayerKernel.h
index b795e03a34..ab009e1aa8 100644
--- a/arm_compute/core/CL/kernels/CLFlattenLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLFlattenLayerKernel.h
@@ -60,7 +60,7 @@ public:
* @param[out] output Output tensor with shape [w*h*d, input_batches] where:
* w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLFlattenLayerKernel
*
* @param[in] input First input tensor to flatten with at least 3 dimensions.
diff --git a/arm_compute/core/CL/kernels/CLFloorKernel.h b/arm_compute/core/CL/kernels/CLFloorKernel.h
index a3ccb96c61..4d1ed789db 100644
--- a/arm_compute/core/CL/kernels/CLFloorKernel.h
+++ b/arm_compute/core/CL/kernels/CLFloorKernel.h
@@ -59,7 +59,7 @@ public:
* @param[in] input Source tensor. Data type supported: F16/F32.
* @param[out] output Destination tensor. Same as @p input
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLFloorKernel
*
diff --git a/arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h b/arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h
index 2d62a576bb..2fe6b223ca 100644
--- a/arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h
+++ b/arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h
@@ -81,7 +81,7 @@ public:
* @param[in] epsilon (Optional) Batch normalization layer epsilon parameter. Defaults to 0.001f.
* @param[in] fbn_type (Optional) Fused batch normalization type. Defaults to CONVOLUTION.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var, ICLTensor *fused_weights, ICLTensor *fused_bias,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var, ICLTensor *fused_weights, ICLTensor *fused_bias,
const ICLTensor *input_bias = nullptr, const ICLTensor *bn_beta = nullptr, const ICLTensor *bn_gamma = nullptr,
float epsilon = 0.001f, FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
/** Static function to check if given info will lead to a valid configuration of @ref CLFuseBatchNormalizationKernel
diff --git a/arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h b/arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h
index 040ca157de..f0f7754960 100644
--- a/arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h
+++ b/arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h
@@ -86,7 +86,7 @@ public:
* @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
*/
- virtual void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ virtual void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) = 0;
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index 7d1b0ea6c7..15f45d52d9 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -384,7 +384,7 @@ cl::Kernel create_opencl_kernel(CLCoreRuntimeContext *ctx, const std::string &ke
}
}
-cl::Kernel create_kernel(CLCompileContext &ctx, const std::string &kernel_name, const std::set<std::string> &build_opts)
+cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set<std::string> &build_opts)
{
const std::string program_name = CLKernelLibrary::get().get_program_name(kernel_name);
std::pair<std::string, bool> kernel_src = CLKernelLibrary::get().get_program(program_name);
diff --git a/src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp b/src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp
index 52ca1d1710..5b03fb56e7 100644
--- a/src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp
+++ b/src/core/CL/kernels/CLAbsoluteDifferenceKernel.cpp
@@ -49,7 +49,7 @@ void CLAbsoluteDifferenceKernel::configure(const ICLTensor *input1, const ICLTen
configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
}
-void CLAbsoluteDifferenceKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+void CLAbsoluteDifferenceKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::S16);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::S16);
diff --git a/src/core/CL/kernels/CLAccumulateKernel.cpp b/src/core/CL/kernels/CLAccumulateKernel.cpp
index aa13b4a207..a7dfcdca9b 100644
--- a/src/core/CL/kernels/CLAccumulateKernel.cpp
+++ b/src/core/CL/kernels/CLAccumulateKernel.cpp
@@ -43,7 +43,7 @@ void CLAccumulateKernel::configure(const ICLTensor *input, ICLTensor *accum)
configure(CLKernelLibrary::get().get_compile_context(), input, accum);
}
-void CLAccumulateKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *accum)
+void CLAccumulateKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *accum)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(accum, 1, DataType::S16);
@@ -60,7 +60,7 @@ void CLAccumulateWeightedKernel::configure(const ICLTensor *input, float alpha,
configure(CLKernelLibrary::get().get_compile_context(), input, alpha, accum);
}
-void CLAccumulateWeightedKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, float alpha, ICLTensor *accum)
+void CLAccumulateWeightedKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, float alpha, ICLTensor *accum)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(accum, 1, DataType::U8);
@@ -82,7 +82,7 @@ void CLAccumulateSquaredKernel::configure(const ICLTensor *input, uint32_t shift
configure(CLKernelLibrary::get().get_compile_context(), input, shift, accum);
}
-void CLAccumulateSquaredKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, uint32_t shift, ICLTensor *accum)
+void CLAccumulateSquaredKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, uint32_t shift, ICLTensor *accum)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(accum, 1, DataType::S16);
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index 15ae8e3d04..d40e9a15be 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -125,7 +125,7 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
configure(CLKernelLibrary::get().get_compile_context(), input, output, act_info);
}
-void CLActivationLayerKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info)
+void CLActivationLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
diff --git a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
index 4e33744094..b86e43e6fb 100644
--- a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
@@ -119,7 +119,7 @@ void CLArgMinMaxLayerKernel::configure(const ICLTensor *input, const ICLTensor *
configure(CLKernelLibrary::get().get_compile_context(), input, prev_output, output, axis, op);
}
-void CLArgMinMaxLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op)
+void CLArgMinMaxLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op));
diff --git a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
index 610d8e8f62..2182019a40 100644
--- a/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
@@ -89,7 +89,7 @@ void CLBatchConcatenateLayerKernel::configure(const ICLTensor *input, unsigned i
configure(CLKernelLibrary::get().get_compile_context(), input, batch_offset, output);
}
-void CLBatchConcatenateLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int batch_offset, ICLTensor *output)
+void CLBatchConcatenateLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int batch_offset, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), batch_offset, output->info()));
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index 8776541536..9db175d922 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -142,7 +142,7 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
configure(CLKernelLibrary::get().get_compile_context(), input, output, mean, var, beta, gamma, epsilon, act_info);
}
-void CLBatchNormalizationLayerKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta,
+void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta,
const ICLTensor *gamma,
float epsilon, ActivationLayerInfo act_info)
{
diff --git a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
index fbdd04c424..f830161633 100644
--- a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
@@ -88,7 +88,7 @@ void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const ICLTenso
configure(CLKernelLibrary::get().get_compile_context(), input, block_shape, output);
}
-void CLBatchToSpaceLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output)
+void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), block_shape->info(), output->info()));
@@ -116,7 +116,7 @@ void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const int32_t
configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, output);
}
-void CLBatchToSpaceLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output)
+void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
diff --git a/src/core/CL/kernels/CLBitwiseAndKernel.cpp b/src/core/CL/kernels/CLBitwiseAndKernel.cpp
index df23b90310..45622aa319 100644
--- a/src/core/CL/kernels/CLBitwiseAndKernel.cpp
+++ b/src/core/CL/kernels/CLBitwiseAndKernel.cpp
@@ -42,7 +42,7 @@ void CLBitwiseAndKernel::configure(const ICLTensor *input1, const ICLTensor *inp
configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
}
-void CLBitwiseAndKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+void CLBitwiseAndKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8);
diff --git a/src/core/CL/kernels/CLBitwiseNotKernel.cpp b/src/core/CL/kernels/CLBitwiseNotKernel.cpp
index 2abfa46301..0ad20a1897 100644
--- a/src/core/CL/kernels/CLBitwiseNotKernel.cpp
+++ b/src/core/CL/kernels/CLBitwiseNotKernel.cpp
@@ -36,7 +36,7 @@ void CLBitwiseNotKernel::configure(const ICLTensor *input, ICLTensor *output)
configure(CLKernelLibrary::get().get_compile_context(), input, output);
}
-void CLBitwiseNotKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+void CLBitwiseNotKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
diff --git a/src/core/CL/kernels/CLBitwiseOrKernel.cpp b/src/core/CL/kernels/CLBitwiseOrKernel.cpp
index 8ab509ae7f..a911dd9e01 100644
--- a/src/core/CL/kernels/CLBitwiseOrKernel.cpp
+++ b/src/core/CL/kernels/CLBitwiseOrKernel.cpp
@@ -43,7 +43,7 @@ void CLBitwiseOrKernel::configure(const ICLTensor *input1, const ICLTensor *inpu
configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
}
-void CLBitwiseOrKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+void CLBitwiseOrKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8);
diff --git a/src/core/CL/kernels/CLBitwiseXorKernel.cpp b/src/core/CL/kernels/CLBitwiseXorKernel.cpp
index c3ff7de820..084991a202 100644
--- a/src/core/CL/kernels/CLBitwiseXorKernel.cpp
+++ b/src/core/CL/kernels/CLBitwiseXorKernel.cpp
@@ -43,7 +43,7 @@ void CLBitwiseXorKernel::configure(const ICLTensor *input1, const ICLTensor *inp
configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
}
-void CLBitwiseXorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+void CLBitwiseXorKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8);
diff --git a/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp b/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp
index 5ed5523632..55c6f8453b 100644
--- a/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp
+++ b/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp
@@ -93,7 +93,7 @@ void CLBoundingBoxTransformKernel::configure(const ICLTensor *boxes, ICLTensor *
configure(CLKernelLibrary::get().get_compile_context(), boxes, pred_boxes, deltas, info);
}
-void CLBoundingBoxTransformKernel::configure(CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info)
+void CLBoundingBoxTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(boxes, pred_boxes, deltas);
auto_init_if_empty(*pred_boxes->info(), deltas->info()->clone()->set_data_type(boxes->info()->data_type()).set_quantization_info(boxes->info()->quantization_info()));
diff --git a/src/core/CL/kernels/CLBox3x3Kernel.cpp b/src/core/CL/kernels/CLBox3x3Kernel.cpp
index e0979a8aa3..d665845ed7 100644
--- a/src/core/CL/kernels/CLBox3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLBox3x3Kernel.cpp
@@ -45,7 +45,7 @@ void CLBox3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, bool b
configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
}
-void CLBox3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+void CLBox3x3Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
diff --git a/src/core/CL/kernels/CLCannyEdgeKernel.cpp b/src/core/CL/kernels/CLCannyEdgeKernel.cpp
index c1aa611566..95b03970b0 100644
--- a/src/core/CL/kernels/CLCannyEdgeKernel.cpp
+++ b/src/core/CL/kernels/CLCannyEdgeKernel.cpp
@@ -43,7 +43,7 @@ void CLGradientKernel::configure(const ICLTensor *gx, const ICLTensor *gy, ICLTe
configure(CLKernelLibrary::get().get_compile_context(), gx, gy, magnitude, phase, norm_type);
}
-void CLGradientKernel::configure(CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, int32_t norm_type)
+void CLGradientKernel::configure(const CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, int32_t norm_type)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gx, 1, DataType::S16, DataType::S32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gy, 1, DataType::S16, DataType::S32);
@@ -128,7 +128,7 @@ void CLEdgeNonMaxSuppressionKernel::configure(const ICLTensor *magnitude, const
configure(CLKernelLibrary::get().get_compile_context(), magnitude, phase, output, lower_thr, border_undefined);
}
-void CLEdgeNonMaxSuppressionKernel::configure(CLCompileContext &compile_context, const ICLTensor *magnitude, const ICLTensor *phase, ICLTensor *output, int32_t lower_thr, bool border_undefined)
+void CLEdgeNonMaxSuppressionKernel::configure(const CLCompileContext &compile_context, const ICLTensor *magnitude, const ICLTensor *phase, ICLTensor *output, int32_t lower_thr, bool border_undefined)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(magnitude, 1, DataType::U16, DataType::U32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(phase, 1, DataType::U8);
@@ -208,7 +208,7 @@ void CLEdgeTraceKernel::configure(const ICLTensor *input, ICLTensor *output, int
configure(CLKernelLibrary::get().get_compile_context(), input, output, upper_thr, lower_thr, visited, recorded, l1_stack, l1_stack_counter);
}
-void CLEdgeTraceKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t upper_thr, int32_t lower_thr,
+void CLEdgeTraceKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t upper_thr, int32_t lower_thr,
ICLTensor *visited, ICLTensor *recorded, ICLTensor *l1_stack, ICLTensor *l1_stack_counter)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16, DataType::U32);
diff --git a/src/core/CL/kernels/CLChannelCombineKernel.cpp b/src/core/CL/kernels/CLChannelCombineKernel.cpp
index 90face2ccc..017d98f860 100644
--- a/src/core/CL/kernels/CLChannelCombineKernel.cpp
+++ b/src/core/CL/kernels/CLChannelCombineKernel.cpp
@@ -56,7 +56,7 @@ void CLChannelCombineKernel::configure(const ICLTensor *plane0, const ICLTensor
configure(CLKernelLibrary::get().get_compile_context(), plane0, plane1, plane2, plane3, output);
}
-void CLChannelCombineKernel::configure(CLCompileContext &compile_context, const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output)
+void CLChannelCombineKernel::configure(const CLCompileContext &compile_context, const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(plane0, plane1, plane2, output);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane0);
@@ -144,7 +144,7 @@ void CLChannelCombineKernel::configure(const ICLImage *plane0, const ICLImage *p
configure(CLKernelLibrary::get().get_compile_context(), plane0, plane1, plane2, output);
}
-void CLChannelCombineKernel::configure(CLCompileContext &compile_context, const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output)
+void CLChannelCombineKernel::configure(const CLCompileContext &compile_context, const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(plane0, plane1, plane2, output);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane0);
diff --git a/src/core/CL/kernels/CLChannelExtractKernel.cpp b/src/core/CL/kernels/CLChannelExtractKernel.cpp
index 8df162c4ee..669d6c52ad 100644
--- a/src/core/CL/kernels/CLChannelExtractKernel.cpp
+++ b/src/core/CL/kernels/CLChannelExtractKernel.cpp
@@ -52,7 +52,7 @@ void CLChannelExtractKernel::configure(const ICLTensor *input, Channel channel,
configure(CLKernelLibrary::get().get_compile_context(), input, channel, output);
}
-void CLChannelExtractKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, Channel channel, ICLTensor *output)
+void CLChannelExtractKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, Channel channel, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_ON(input == output);
@@ -114,7 +114,7 @@ void CLChannelExtractKernel::configure(const ICLMultiImage *input, Channel chann
configure(CLKernelLibrary::get().get_compile_context(), input, channel, output);
}
-void CLChannelExtractKernel::configure(CLCompileContext &compile_context, const ICLMultiImage *input, Channel channel, ICLImage *output)
+void CLChannelExtractKernel::configure(const CLCompileContext &compile_context, const ICLMultiImage *input, Channel channel, ICLImage *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(output);
diff --git a/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp b/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp
index 5e6bbb395b..d4eafec4c4 100644
--- a/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp
+++ b/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp
@@ -95,7 +95,7 @@ void CLChannelShuffleLayerKernel::configure(const ICLTensor *input, ICLTensor *o
configure(CLKernelLibrary::get().get_compile_context(), input, output, num_groups);
}
-void CLChannelShuffleLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups)
+void CLChannelShuffleLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
diff --git a/src/core/CL/kernels/CLCol2ImKernel.cpp b/src/core/CL/kernels/CLCol2ImKernel.cpp
index d96ec96126..5adb9ef60d 100644
--- a/src/core/CL/kernels/CLCol2ImKernel.cpp
+++ b/src/core/CL/kernels/CLCol2ImKernel.cpp
@@ -94,7 +94,7 @@ void CLCol2ImKernel::configure(const ICLTensor *input, ICLTensor *output, const
configure(CLKernelLibrary::get().get_compile_context(), input, output, convolved_dims, num_groups);
}
-void CLCol2ImKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups)
+void CLCol2ImKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
diff --git a/src/core/CL/kernels/CLColorConvertKernel.cpp b/src/core/CL/kernels/CLColorConvertKernel.cpp
index 720d925427..e9612f3ea6 100644
--- a/src/core/CL/kernels/CLColorConvertKernel.cpp
+++ b/src/core/CL/kernels/CLColorConvertKernel.cpp
@@ -51,7 +51,7 @@ void CLColorConvertKernel::configure(const ICLTensor *input, ICLTensor *output)
configure(CLKernelLibrary::get().get_compile_context(), input, output);
}
-void CLColorConvertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+void CLColorConvertKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON(input == nullptr);
ARM_COMPUTE_ERROR_ON(output == nullptr);
@@ -147,7 +147,7 @@ void CLColorConvertKernel::configure(const ICLMultiImage *input, ICLImage *outpu
configure(CLKernelLibrary::get().get_compile_context(), input, output);
}
-void CLColorConvertKernel::configure(CLCompileContext &compile_context, const ICLMultiImage *input, ICLImage *output)
+void CLColorConvertKernel::configure(const CLCompileContext &compile_context, const ICLMultiImage *input, ICLImage *output)
{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(output);
ARM_COMPUTE_ERROR_ON(output == nullptr);
@@ -237,7 +237,7 @@ void CLColorConvertKernel::configure(const ICLImage *input, ICLMultiImage *outpu
configure(CLKernelLibrary::get().get_compile_context(), input, output);
}
-void CLColorConvertKernel::configure(CLCompileContext &compile_context, const ICLImage *input, ICLMultiImage *output)
+void CLColorConvertKernel::configure(const CLCompileContext &compile_context, const ICLImage *input, ICLMultiImage *output)
{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON(output == nullptr);
@@ -349,7 +349,7 @@ void CLColorConvertKernel::configure(const ICLMultiImage *input, ICLMultiImage *
configure(CLKernelLibrary::get().get_compile_context(), input, output);
}
-void CLColorConvertKernel::configure(CLCompileContext &compile_context, const ICLMultiImage *input, ICLMultiImage *output)
+void CLColorConvertKernel::configure(const CLCompileContext &compile_context, const ICLMultiImage *input, ICLMultiImage *output)
{
unsigned int num_elems_processed_per_iteration = 0;
switch(input->info()->format())
diff --git a/src/core/CL/kernels/CLComparisonKernel.cpp b/src/core/CL/kernels/CLComparisonKernel.cpp
index 61aeebea5a..216190752c 100644
--- a/src/core/CL/kernels/CLComparisonKernel.cpp
+++ b/src/core/CL/kernels/CLComparisonKernel.cpp
@@ -111,7 +111,7 @@ void CLComparisonKernel::configure(const ICLTensor *input1, const ICLTensor *inp
configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, operation);
}
-void CLComparisonKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation)
+void CLComparisonKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info(), operation));
diff --git a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
index f57ff6c07e..9670fae36a 100644
--- a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
+++ b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
@@ -44,7 +44,7 @@ void CLConvertFullyConnectedWeightsKernel::configure(const ICLTensor *input, ICL
configure(CLKernelLibrary::get().get_compile_context(), input, output, original_input_shape, data_layout);
}
-void CLConvertFullyConnectedWeightsKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape,
+void CLConvertFullyConnectedWeightsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape,
DataLayout data_layout)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
diff --git a/src/core/CL/kernels/CLConvolutionKernel.cpp b/src/core/CL/kernels/CLConvolutionKernel.cpp
index 3cc6d24de2..2476180ba4 100644
--- a/src/core/CL/kernels/CLConvolutionKernel.cpp
+++ b/src/core/CL/kernels/CLConvolutionKernel.cpp
@@ -64,7 +64,7 @@ void CLConvolutionKernel<matrix_size>::configure(const ICLTensor *input, ICLTens
}
template <unsigned int matrix_size>
-void CLConvolutionKernel<matrix_size>::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined)
+void CLConvolutionKernel<matrix_size>::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16);
@@ -140,7 +140,7 @@ void CLSeparableConvolutionHorKernel<matrix_size>::configure(const ICLTensor *in
}
template <unsigned int matrix_size>
-void CLSeparableConvolutionHorKernel<matrix_size>::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, bool border_undefined)
+void CLSeparableConvolutionHorKernel<matrix_size>::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, bool border_undefined)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U16, DataType::S16, DataType::S32);
@@ -216,7 +216,7 @@ void CLSeparableConvolutionVertKernel<matrix_size>::configure(const ICLTensor *i
}
template <unsigned int matrix_size>
-void CLSeparableConvolutionVertKernel<matrix_size>::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output,
+void CLSeparableConvolutionVertKernel<matrix_size>::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output,
const int16_t *conv, uint32_t scale, bool border_undefined, DataType data_type)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16, DataType::S16, DataType::S32);
@@ -303,7 +303,7 @@ void CLConvolutionRectangleKernel::configure(const ICLTensor *input, ICLTensor *
configure(CLKernelLibrary::get().get_compile_context(), input, output, conv, width, height, scale, border_undefined);
}
-void CLConvolutionRectangleKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale,
+void CLConvolutionRectangleKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale,
bool border_undefined)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
diff --git a/src/core/CL/kernels/CLCopyKernel.cpp b/src/core/CL/kernels/CLCopyKernel.cpp
index e59223e511..a864502a5f 100644
--- a/src/core/CL/kernels/CLCopyKernel.cpp
+++ b/src/core/CL/kernels/CLCopyKernel.cpp
@@ -160,7 +160,7 @@ void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const Pa
configure(CLKernelLibrary::get().get_compile_context(), input, output, padding, output_window);
}
-void CLCopyKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, Window *output_window)
+void CLCopyKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, Window *output_window)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding, output_window));
diff --git a/src/core/CL/kernels/CLCropKernel.cpp b/src/core/CL/kernels/CLCropKernel.cpp
index 2c17c99559..eb1ab7aebb 100644
--- a/src/core/CL/kernels/CLCropKernel.cpp
+++ b/src/core/CL/kernels/CLCropKernel.cpp
@@ -52,8 +52,8 @@ void CLCropKernel::configure(const ICLTensor *input, ICLTensor *output, Coordina
configure(CLKernelLibrary::get().get_compile_context(), input, output, start, end, batch_index, extrapolation_value, output_window);
}
-void CLCropKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value,
- Window *output_window)
+void CLCropKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index,
+ float extrapolation_value, Window *output_window)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), start, end, batch_index, extrapolation_value, output_window));
diff --git a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp
index f92f7da37f..a368fae43b 100644
--- a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp
+++ b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp
@@ -73,7 +73,7 @@ void CLDeconvolutionLayerUpsampleKernel::configure(const ICLTensor *input, ICLTe
configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
}
-void CLDeconvolutionLayerUpsampleKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output,
+void CLDeconvolutionLayerUpsampleKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output,
const PadStrideInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
diff --git a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
index 68607e9fc6..7a4b7df5e2 100644
--- a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
+++ b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
@@ -119,7 +119,7 @@ void CLDeconvolutionReshapeOutputKernel::configure(const ICLTensor *input, const
configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, input_info, weights_info, deconv_info);
}
-void CLDeconvolutionReshapeOutputKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info,
+void CLDeconvolutionReshapeOutputKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info,
const ITensorInfo *weights_info,
const PadStrideInfo &deconv_info)
{
diff --git a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
index 241adb297b..65b603602c 100644
--- a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
@@ -87,7 +87,7 @@ void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned i
configure(CLKernelLibrary::get().get_compile_context(), input, depth_offset, output);
}
-void CLDepthConcatenateLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int depth_offset, ICLTensor *output)
+void CLDepthConcatenateLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int depth_offset, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), depth_offset, output->info()));
diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
index 2e29dbf92a..868d4efc51 100644
--- a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
@@ -77,7 +77,7 @@ void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *out
configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, shift);
}
-void CLDepthConvertLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
+void CLDepthConvertLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
diff --git a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp
index cd61a91ec5..ffd31552f1 100644
--- a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp
@@ -70,7 +70,7 @@ void CLDepthToSpaceLayerKernel::configure(const ICLTensor *input, ICLTensor *out
configure(CLKernelLibrary::get().get_compile_context(), input, output, block_shape);
}
-void CLDepthToSpaceLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape)
+void CLDepthToSpaceLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
index e293fa264f..936cdd849c 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
@@ -249,7 +249,7 @@ void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const ICLTensor *input,
configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts);
}
-void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
index 71af63a97f..fe72260e3b 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
@@ -202,7 +202,7 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input,
configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts);
}
-void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
index 45df9ed59a..e6c9861c4a 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp
@@ -199,7 +199,7 @@ void CLDepthwiseConvolutionLayerNativeKernel::configure(const ICLTensor *input,
configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation, output_multipliers, output_shifts);
}
-void CLDepthwiseConvolutionLayerNativeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+void CLDepthwiseConvolutionLayerNativeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
const DWCWeightsKernelInfo &dwc_weights_info,
const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation,
const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp
index 7e38e77107..d284203b76 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp
@@ -91,7 +91,7 @@ void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(const ICLTensor
configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
}
-void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info)
+void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), info));
diff --git a/src/core/CL/kernels/CLDequantizationLayerKernel.cpp b/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
index ae7489f0a8..ec9b5cb8e2 100644
--- a/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
@@ -80,7 +80,7 @@ void CLDequantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *o
configure(CLKernelLibrary::get().get_compile_context(), input, output);
}
-void CLDequantizationLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+void CLDequantizationLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
diff --git a/src/core/CL/kernels/CLDerivativeKernel.cpp b/src/core/CL/kernels/CLDerivativeKernel.cpp
index 670cde308c..595ff9bdee 100644
--- a/src/core/CL/kernels/CLDerivativeKernel.cpp
+++ b/src/core/CL/kernels/CLDerivativeKernel.cpp
@@ -53,7 +53,7 @@ void CLDerivativeKernel::configure(const ICLTensor *input, ICLTensor *output_x,
configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined);
}
-void CLDerivativeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
+void CLDerivativeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr));
diff --git a/src/core/CL/kernels/CLDilateKernel.cpp b/src/core/CL/kernels/CLDilateKernel.cpp
index 0f6879dcc8..7cba97f839 100644
--- a/src/core/CL/kernels/CLDilateKernel.cpp
+++ b/src/core/CL/kernels/CLDilateKernel.cpp
@@ -41,7 +41,7 @@ void CLDilateKernel::configure(const ICLTensor *input, ICLTensor *output, bool b
configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
}
-void CLDilateKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+void CLDilateKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
index ff3d106f46..2fcc82fbd3 100644
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
@@ -426,7 +426,7 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info);
}
-void CLDirectConvolutionLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
diff --git a/src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp b/src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp
index 7356c5a5cd..5c74579184 100644
--- a/src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp
+++ b/src/core/CL/kernels/CLElementWiseUnaryLayerKernel.cpp
@@ -55,7 +55,7 @@ void CLElementWiseUnaryLayerKernel::configure(const ICLTensor *input, ICLTensor
configure(CLKernelLibrary::get().get_compile_context(), input, output, op);
}
-void CLElementWiseUnaryLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ElementWiseUnary &op)
+void CLElementWiseUnaryLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ElementWiseUnary &op)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input->info(), *output->info()));
diff --git a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
index ee4ef40b87..00a97d50e9 100644
--- a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
+++ b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
@@ -240,7 +240,7 @@ void CLElementwiseOperationKernel::configure_common(const ICLTensor *input1, con
configure_common(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
}
-void CLElementwiseOperationKernel::configure_common(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+void CLElementwiseOperationKernel::configure_common(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info()));
@@ -337,7 +337,7 @@ void CLSaturatedArithmeticOperationKernel::configure(ArithmeticOperation op, con
configure(CLKernelLibrary::get().get_compile_context(), op, input1, input2, output, policy, act_info);
}
-void CLSaturatedArithmeticOperationKernel::configure(CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
+void CLSaturatedArithmeticOperationKernel::configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
const ConvertPolicy &policy,
const ActivationLayerInfo &act_info)
{
@@ -396,7 +396,7 @@ void CLArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTen
configure(CLKernelLibrary::get().get_compile_context(), op, input1, input2, output, act_info);
}
-void CLArithmeticOperationKernel::configure(CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
+void CLArithmeticOperationKernel::configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
const ActivationLayerInfo &act_info)
{
_op = op;
diff --git a/src/core/CL/kernels/CLErodeKernel.cpp b/src/core/CL/kernels/CLErodeKernel.cpp
index e959d1c320..6cb5ffc8cc 100644
--- a/src/core/CL/kernels/CLErodeKernel.cpp
+++ b/src/core/CL/kernels/CLErodeKernel.cpp
@@ -41,7 +41,7 @@ void CLErodeKernel::configure(const ICLTensor *input, ICLTensor *output, bool bo
configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined);
}
-void CLErodeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
+void CLErodeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
diff --git a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
index 5542ad72f4..03e6ee7dfb 100644
--- a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
+++ b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp
@@ -78,7 +78,7 @@ void CLFFTDigitReverseKernel::configure(const ICLTensor *input, ICLTensor *outpu
configure(CLKernelLibrary::get().get_compile_context(), input, output, idx, config);
}
-void CLFFTDigitReverseKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config)
+void CLFFTDigitReverseKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, idx);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), idx->info(), config));
diff --git a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
index 6e7e1eff06..63c093958c 100644
--- a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
+++ b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp
@@ -88,7 +88,7 @@ void CLFFTRadixStageKernel::configure(ICLTensor *input, ICLTensor *output, const
configure(CLKernelLibrary::get().get_compile_context(), input, output, config);
}
-void CLFFTRadixStageKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config)
+void CLFFTRadixStageKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr, config));
diff --git a/src/core/CL/kernels/CLFFTScaleKernel.cpp b/src/core/CL/kernels/CLFFTScaleKernel.cpp
index 32e652ad1d..4738a12b49 100644
--- a/src/core/CL/kernels/CLFFTScaleKernel.cpp
+++ b/src/core/CL/kernels/CLFFTScaleKernel.cpp
@@ -81,7 +81,7 @@ void CLFFTScaleKernel::configure(ICLTensor *input, ICLTensor *output, const FFTS
configure(CLKernelLibrary::get().get_compile_context(), input, output, config);
}
-void CLFFTScaleKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config)
+void CLFFTScaleKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr));
diff --git a/src/core/CL/kernels/CLFastCornersKernel.cpp b/src/core/CL/kernels/CLFastCornersKernel.cpp
index 2bd4d89bbc..4c2086c1c6 100644
--- a/src/core/CL/kernels/CLFastCornersKernel.cpp
+++ b/src/core/CL/kernels/CLFastCornersKernel.cpp
@@ -53,7 +53,7 @@ void CLFastCornersKernel::configure(const ICLImage *input, ICLImage *output, flo
configure(CLKernelLibrary::get().get_compile_context(), input, output, threshold, non_max_suppression, border_mode);
}
-void CLFastCornersKernel::configure(CLCompileContext &compile_context, const ICLImage *input, ICLImage *output, float threshold, bool non_max_suppression, BorderMode border_mode)
+void CLFastCornersKernel::configure(const CLCompileContext &compile_context, const ICLImage *input, ICLImage *output, float threshold, bool non_max_suppression, BorderMode border_mode)
{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(output);
@@ -141,7 +141,7 @@ void CLCopyToArrayKernel::configure(const ICLImage *input, bool update_number, I
configure(CLKernelLibrary::get().get_compile_context(), input, update_number, corners, num_buffers);
}
-void CLCopyToArrayKernel::configure(CLCompileContext &compile_context, const ICLImage *input, bool update_number, ICLKeyPointArray *corners, cl::Buffer *num_buffers)
+void CLCopyToArrayKernel::configure(const CLCompileContext &compile_context, const ICLImage *input, bool update_number, ICLKeyPointArray *corners, cl::Buffer *num_buffers)
{
ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
diff --git a/src/core/CL/kernels/CLFillBorderKernel.cpp b/src/core/CL/kernels/CLFillBorderKernel.cpp
index c69a8c9f92..b2f06b34bb 100644
--- a/src/core/CL/kernels/CLFillBorderKernel.cpp
+++ b/src/core/CL/kernels/CLFillBorderKernel.cpp
@@ -65,7 +65,7 @@ void CLFillBorderKernel::configure(ICLTensor *tensor, BorderSize border_size, Bo
configure(CLKernelLibrary::get().get_compile_context(), tensor, border_size, border_mode, constant_border_value);
}
-void CLFillBorderKernel::configure(CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value)
+void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value)
{
ARM_COMPUTE_ERROR_ON(tensor == nullptr);
ARM_COMPUTE_ERROR_ON(tensor->info()->num_channels() != 1);
diff --git a/src/core/CL/kernels/CLFlattenLayerKernel.cpp b/src/core/CL/kernels/CLFlattenLayerKernel.cpp
index c2dc933f5a..d7b4a6eade 100644
--- a/src/core/CL/kernels/CLFlattenLayerKernel.cpp
+++ b/src/core/CL/kernels/CLFlattenLayerKernel.cpp
@@ -85,7 +85,7 @@ void CLFlattenLayerKernel::configure(const ICLTensor *input, ICLTensor *output)
configure(CLKernelLibrary::get().get_compile_context(), input, output);
}
-void CLFlattenLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+void CLFlattenLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
diff --git a/src/core/CL/kernels/CLFloorKernel.cpp b/src/core/CL/kernels/CLFloorKernel.cpp
index abfed8d18e..9b2133d796 100644
--- a/src/core/CL/kernels/CLFloorKernel.cpp
+++ b/src/core/CL/kernels/CLFloorKernel.cpp
@@ -77,7 +77,7 @@ CLFloorKernel::CLFloorKernel()
{
}
-void CLFloorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
+void CLFloorKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
diff --git a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp
index 6f4ba0ed06..fd03e8379c 100644
--- a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp
+++ b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp
@@ -112,7 +112,7 @@ void CLFuseBatchNormalizationKernel::configure(const ICLTensor *input_weights, c
configure(CLKernelLibrary::get().get_compile_context(), input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, bn_beta, bn_gamma, epsilon, fbn_type);
}
-void CLFuseBatchNormalizationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var,
+void CLFuseBatchNormalizationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var,
ICLTensor *fused_weights, ICLTensor *fused_bias,
const ICLTensor *input_bias, const ICLTensor *bn_beta, const ICLTensor *bn_gamma,
float epsilon, FuseBatchNormalizationType fbn_type)