aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/CL
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2020-04-21 13:29:30 +0100
committerManuel Bottini <manuel.bottini@arm.com>2020-04-22 08:57:15 +0000
commit256c0b9d5d66d35d52c7eee3599a4d91e7887ec8 (patch)
tree6bb2bd6648ed3a7292d63c731a679bb7cb2cf3d3 /arm_compute/core/CL
parent7ba240bb54bdbd50f1809f944dde2fac7005cc10 (diff)
downloadComputeLibrary-256c0b9d5d66d35d52c7eee3599a4d91e7887ec8.tar.gz
COMPMID-3280: Make all ML primitives for CL use the new interface - Part1 - Fix1
- const fix in the CLKernels part 1 Change-Id: I17340cb6ff26afd52b14b46645efedbe07aef1b6 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3067 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core/CL')
-rw-r--r--arm_compute/core/CL/CLHelpers.h2
-rw-r--r--arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLAccumulateKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLActivationLayerKernel.h8
-rw-r--r--arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h5
-rw-r--r--arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLBitwiseAndKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBitwiseNotKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBitwiseOrKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBitwiseXorKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLBox3x3Kernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLCannyEdgeKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLChannelCombineKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLChannelExtractKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLCol2ImKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLColorConvertKernel.h8
-rw-r--r--arm_compute/core/CL/kernels/CLComparisonKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLConvolutionKernel.h8
-rw-r--r--arm_compute/core/CL/kernels/CLCopyKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLCropKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDerivativeKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDilateKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLErodeKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFFTScaleKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFastCornersKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLFillBorderKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFlattenLayerKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFloorKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h2
50 files changed, 70 insertions, 71 deletions
diff --git a/arm_compute/core/CL/CLHelpers.h b/arm_compute/core/CL/CLHelpers.h
index 77c17c7d9c..c5db66c664 100644
--- a/arm_compute/core/CL/CLHelpers.h
+++ b/arm_compute/core/CL/CLHelpers.h
@@ -206,7 +206,7 @@ cl::Kernel create_opencl_kernel(CLCoreRuntimeContext *ctx, const std::string &ke
*
* @return An opencl kernel
*/
-cl::Kernel create_kernel(CLCompileContext &ctx, const std::string &kernel_name, const std::set<std::string> &build_opts = std::set<std::string>());
+cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set<std::string> &build_opts = std::set<std::string>());
/** Creates a suitable LWS hint object for parallel implementations. Sets the number of WG based on the input size.
* If input width is smaller than 128 we can use fewer threads than 8.
diff --git a/arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h b/arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h
index 18896725e2..58dea3bdae 100644
--- a/arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h
+++ b/arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h
@@ -65,7 +65,7 @@ public:
* @param[in] input2 Source tensor. Data types supported: U8/S16.
* @param[out] output Destination tensor. Data types supported: U8/S16.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLAccumulateKernel.h b/arm_compute/core/CL/kernels/CLAccumulateKernel.h
index d7cb09fdd3..f639148e25 100644
--- a/arm_compute/core/CL/kernels/CLAccumulateKernel.h
+++ b/arm_compute/core/CL/kernels/CLAccumulateKernel.h
@@ -52,7 +52,7 @@ public:
* @param[in] input Source tensor. Data types supported: U8.
* @param[out] accum Destination tensor. Data types supported: S16.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *accum);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *accum);
};
/** Interface for the accumulate weighted kernel.
@@ -81,7 +81,7 @@ public:
* @param[in] alpha Scalar value in the range [0, 1.0]. Data types supported: F32.
* @param[in,out] accum Accumulated tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, float alpha, ICLTensor *accum);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, float alpha, ICLTensor *accum);
};
/** Interface for the accumulate squared kernel.
@@ -108,7 +108,7 @@ public:
* @param[in] shift Shift value in the range of [0, 15]. Data types supported: U32.
* @param[in,out] accum Accumulated tensor. Data types supported: S16.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, uint32_t shift, ICLTensor *accum);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, uint32_t shift, ICLTensor *accum);
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLACCUMULATEKERNEL_H */
diff --git a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
index d25480cd60..1e83a689cd 100644
--- a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
@@ -65,7 +65,7 @@ public:
* @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] act_info Activation layer information.
*/
- void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayerKernel
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
@@ -81,9 +81,9 @@ public:
void run(const Window &window, cl::CommandQueue &queue) override;
private:
- ICLTensor *_input;
- ICLTensor *_output;
- bool _run_in_place;
+ ICLTensor *_input;
+ ICLTensor *_output;
+ bool _run_in_place;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLACTIVATIONLAYERKERNEL_H */
diff --git a/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h b/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h
index 831cee5e58..94e8baed13 100644
--- a/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h
@@ -76,7 +76,7 @@ public:
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
* @param[in] op Reduction operation to perform. Only ArgMin and ArgMax are supported.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op);
/** Static function to check if given info will lead to a valid configuration of @ref CLArgMinMaxLayerKernel.
*
diff --git a/arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h
index 06764302f4..163666853c 100644
--- a/arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h
@@ -72,7 +72,7 @@ public:
* @note: The gaps between the two lowest dimensions of input and output need to be divisible by 2.
*
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int batch_offset, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int batch_offset, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchConcatenateLayerKernel
*
* @param[in] input Input tensor info. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
index 564b21680b..8eaaca845a 100644
--- a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
@@ -81,9 +81,8 @@ public:
* @param[in] epsilon (Optional) Small value to avoid division with zero. Default value is 0.001f.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
*/
- void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta = nullptr, const ICLTensor *gamma = nullptr,
- float epsilon = 0.001f,
- ActivationLayerInfo act_info = ActivationLayerInfo());
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta = nullptr,
+ const ICLTensor *gamma = nullptr, float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayerKernel
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
diff --git a/arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h
index f9289eab73..2b12ad094a 100644
--- a/arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h
@@ -61,7 +61,7 @@ public:
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
/** Initialise the kernel's inputs and output (Static block shape).
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
@@ -78,7 +78,7 @@ public:
* @param[in] block_shape_y Block shape y value.
* @param[out] output Tensor output. Data types supported: same as @p input
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchToSpaceLayerKernel
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLBitwiseAndKernel.h b/arm_compute/core/CL/kernels/CLBitwiseAndKernel.h
index 6c60bc0f33..8defe32862 100644
--- a/arm_compute/core/CL/kernels/CLBitwiseAndKernel.h
+++ b/arm_compute/core/CL/kernels/CLBitwiseAndKernel.h
@@ -62,7 +62,7 @@ public:
* @param[in] input2 Source tensor. Data types supported: U8.
* @param[out] output Destination tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBitwiseNotKernel.h b/arm_compute/core/CL/kernels/CLBitwiseNotKernel.h
index 0522841e73..b86ce7f173 100644
--- a/arm_compute/core/CL/kernels/CLBitwiseNotKernel.h
+++ b/arm_compute/core/CL/kernels/CLBitwiseNotKernel.h
@@ -50,7 +50,7 @@ public:
* @param[in] input Source tensor. Data types supported: U8.
* @param[out] output Destination tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_CLBITWISENOTKERNEL_H */
diff --git a/arm_compute/core/CL/kernels/CLBitwiseOrKernel.h b/arm_compute/core/CL/kernels/CLBitwiseOrKernel.h
index 151f19d374..65eb50f0fd 100644
--- a/arm_compute/core/CL/kernels/CLBitwiseOrKernel.h
+++ b/arm_compute/core/CL/kernels/CLBitwiseOrKernel.h
@@ -62,7 +62,7 @@ public:
* @param[in] input2 Source tensor. Data types supported: U8.
* @param[out] output Destination tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBitwiseXorKernel.h b/arm_compute/core/CL/kernels/CLBitwiseXorKernel.h
index 03c1e05da4..5c63a7f22c 100644
--- a/arm_compute/core/CL/kernels/CLBitwiseXorKernel.h
+++ b/arm_compute/core/CL/kernels/CLBitwiseXorKernel.h
@@ -62,7 +62,7 @@ public:
* @param[in] input2 Source tensor. Data types supported: U8.
* @param[out] output Destination tensor. Data types supported: U8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h b/arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h
index ffa63bd5a4..bbe11562ed 100644
--- a/arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h
+++ b/arm_compute/core/CL/kernels/CLBoundingBoxTransformKernel.h
@@ -71,7 +71,7 @@ public:
* @note Only single image prediction is supported. Height and Width (and scale) of the image will be contained in the BoundingBoxTransformInfo struct.
*
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLBoundingBoxTransform
*
diff --git a/arm_compute/core/CL/kernels/CLBox3x3Kernel.h b/arm_compute/core/CL/kernels/CLBox3x3Kernel.h
index 572ae87d9a..ea3c1c1f3e 100644
--- a/arm_compute/core/CL/kernels/CLBox3x3Kernel.h
+++ b/arm_compute/core/CL/kernels/CLBox3x3Kernel.h
@@ -50,7 +50,7 @@ public:
* @param[out] output The output tensor. Data types supported: U8.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
//Inherited methods overriden:
BorderSize border_size() const override;
diff --git a/arm_compute/core/CL/kernels/CLCannyEdgeKernel.h b/arm_compute/core/CL/kernels/CLCannyEdgeKernel.h
index 67c23dd811..40ad4dcd84 100644
--- a/arm_compute/core/CL/kernels/CLCannyEdgeKernel.h
+++ b/arm_compute/core/CL/kernels/CLCannyEdgeKernel.h
@@ -65,7 +65,7 @@ public:
* @param[out] phase Destination tensor - Quantized phase. Data types supported: U8.
* @param[in] norm_type Normalization type. if 1, L1-Norm otherwise L2-Norm.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, int32_t norm_type);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, int32_t norm_type);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -111,7 +111,7 @@ public:
* @param[in] lower_thr Lower threshold.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *magnitude, const ICLTensor *phase, ICLTensor *output, int32_t lower_thr, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *magnitude, const ICLTensor *phase, ICLTensor *output, int32_t lower_thr, bool border_undefined);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -167,7 +167,7 @@ public:
* @param[in,out] l1_stack_counter Tensor for counting the elements in the L1 stack of each pixel. Data types supported: U8.
* Expected to be initialized to 0 before each run.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t upper_thr, int32_t lower_thr,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t upper_thr, int32_t lower_thr,
ICLTensor *visited, ICLTensor *recorded, ICLTensor *l1_stack, ICLTensor *l1_stack_counter);
// Inherited methods overridden:
diff --git a/arm_compute/core/CL/kernels/CLChannelCombineKernel.h b/arm_compute/core/CL/kernels/CLChannelCombineKernel.h
index 60d0bd4a45..32ddf152c3 100644
--- a/arm_compute/core/CL/kernels/CLChannelCombineKernel.h
+++ b/arm_compute/core/CL/kernels/CLChannelCombineKernel.h
@@ -69,7 +69,7 @@ public:
* @param[in] plane3 The 2D plane that forms channel 3. Must be of U8 format.
* @param[out] output The single planar output tensor.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output);
/** Configure function's inputs and outputs.
*
* @param[in] plane0 The 2D plane that forms channel 0. Must be of U8 format.
@@ -86,7 +86,7 @@ public:
* @param[in] plane2 The 2D plane that forms channel 2. Must be of U8 format.
* @param[out] output The multi planar output tensor.
*/
- void configure(CLCompileContext &compile_context, const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLChannelExtractKernel.h b/arm_compute/core/CL/kernels/CLChannelExtractKernel.h
index 1f2cc8900a..6a0c4bb94e 100644
--- a/arm_compute/core/CL/kernels/CLChannelExtractKernel.h
+++ b/arm_compute/core/CL/kernels/CLChannelExtractKernel.h
@@ -65,7 +65,7 @@ public:
* @param[in] channel Channel to extract.
* @param[out] output Destination tensor. Must be of U8 format.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, Channel channel, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, Channel channel, ICLTensor *output);
/** Set the input and output of the kernel
*
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV/YUV444
@@ -80,7 +80,7 @@ public:
* @param[in] channel Channel to extract.
* @param[out] output Single-planar 2D destination image. Must be of U8 format.
*/
- void configure(CLCompileContext &compile_context, const ICLMultiImage *input, Channel channel, ICLImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLMultiImage *input, Channel channel, ICLImage *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h b/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h
index 921c20df10..14b59d325f 100644
--- a/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h
@@ -60,7 +60,7 @@ public:
* @param[out] output Output tensor. Data type supported: Same as @p input
* @param[in] num_groups Number of groups. Must be greater than 1 and the number of channels of the tensors must be a multiple of the number of groups.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups);
/** Static function to check if given info will lead to a valid configuration of @ref CLChannelShuffleLayerKernel
*
* @param[in] input Input tensor info. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLCol2ImKernel.h b/arm_compute/core/CL/kernels/CLCol2ImKernel.h
index 6ef424853e..d0528ed21a 100644
--- a/arm_compute/core/CL/kernels/CLCol2ImKernel.h
+++ b/arm_compute/core/CL/kernels/CLCol2ImKernel.h
@@ -81,7 +81,7 @@ public:
* @param[in] convolved_dims Output convolved dimensions.
* @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups = 1);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &convolved_dims, unsigned int num_groups = 1);
/** Static function to check if given info will lead to a valid configuration of @ref CLCol2ImKernel
*
* @param[in] input The input tensor to convert. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
diff --git a/arm_compute/core/CL/kernels/CLColorConvertKernel.h b/arm_compute/core/CL/kernels/CLColorConvertKernel.h
index 25b95eb42c..2bcd141863 100644
--- a/arm_compute/core/CL/kernels/CLColorConvertKernel.h
+++ b/arm_compute/core/CL/kernels/CLColorConvertKernel.h
@@ -67,7 +67,7 @@ public:
* RGBA8888 (if the formats of @p input are UYVY422/YUYV422/RGB888/),
* U8 (if the formats of @p input is RGB888)
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
/** Set the input and output of the kernel
*
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV
@@ -80,7 +80,7 @@ public:
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV
* @param[out] output Single-planar destination image. Formats supported: RGB888/RGBA8888
*/
- void configure(CLCompileContext &compile_context, const ICLMultiImage *input, ICLImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLMultiImage *input, ICLImage *output);
/** Set the input and output of the kernel
*
* @param[in] input Single-planar source image. Formats supported: RGB888/RGBA8888/UYVY422/YUYV422
@@ -93,7 +93,7 @@ public:
* @param[in] input Single-planar source image. Formats supported: RGB888/RGBA8888/UYVY422/YUYV422
* @param[out] output Multi-planar destination image. Formats supported: NV12/IYUV/YUV444 (if the formats of @p input are RGB888/RGB8888)
*/
- void configure(CLCompileContext &compile_context, const ICLImage *input, ICLMultiImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLImage *input, ICLMultiImage *output);
/** Set the input and output of the kernel
*
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV
@@ -106,7 +106,7 @@ public:
* @param[in] input Multi-planar source image. Formats supported: NV12/NV21/IYUV
* @param[out] output Multi-planar destination image. Formats supported: YUV444/IYUV (if the formats of @p input are NV12/NV21)/NV12 (if the format of @p input is IYUV)
*/
- void configure(CLCompileContext &compile_context, const ICLMultiImage *input, ICLMultiImage *output);
+ void configure(const CLCompileContext &compile_context, const ICLMultiImage *input, ICLMultiImage *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLComparisonKernel.h b/arm_compute/core/CL/kernels/CLComparisonKernel.h
index 15779938b2..d5c5297c61 100644
--- a/arm_compute/core/CL/kernels/CLComparisonKernel.h
+++ b/arm_compute/core/CL/kernels/CLComparisonKernel.h
@@ -64,7 +64,7 @@ public:
* @param[out] output Destination tensor. Data types supported: U8.
* @param[in] operation Comparison operation to use.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ComparisonOperation operation);
/** Static function to check if given info will lead to a valid configuration of @ref CLComparisonKernel
*
* @param[in] input1 Source tensor. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
index f7e212e1e4..d3e57a6738 100644
--- a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
+++ b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
@@ -69,7 +69,7 @@ public:
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
* @param[in] data_layout The data layout the weights have been trained in.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
/** Static function to check if given info will lead to a valid configuration of @ref CLConvertFullyConnectedWeightsKernel
*
* @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLConvolutionKernel.h b/arm_compute/core/CL/kernels/CLConvolutionKernel.h
index e1cdc88007..b6fe51dbaa 100644
--- a/arm_compute/core/CL/kernels/CLConvolutionKernel.h
+++ b/arm_compute/core/CL/kernels/CLConvolutionKernel.h
@@ -70,7 +70,7 @@ public:
* @param[in] scale Scale of the convolution matrix. If 0 is passed, it will be set to the sum of the coefficients of the convolution or 1 if they add up to 0.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined);
// Inherited methods overridden:
BorderSize border_size() const override;
@@ -112,7 +112,7 @@ public:
* @param[in] conv Convolution matrix to apply to the input tensor.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, bool border_undefined);
// Inherited methods overridden:
BorderSize border_size() const override;
@@ -153,7 +153,7 @@ public:
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
* @param[in] data_type Data type to use for intermeidate result. @sa data_type_for_convolution
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined, DataType data_type = DataType::S32);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, bool border_undefined, DataType data_type = DataType::S32);
// Inherited methods overridden:
BorderSize border_size() const override;
@@ -209,7 +209,7 @@ public:
* @param[in] scale Scale of the convolution matrix. If 0 is passed, it will be set to the sum of the coefficients of the convolution or 1 if they add up to 0.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t width, uint32_t height, uint32_t scale, bool border_undefined);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLCopyKernel.h b/arm_compute/core/CL/kernels/CLCopyKernel.h
index 1774f8ccad..05dff8ed0c 100644
--- a/arm_compute/core/CL/kernels/CLCopyKernel.h
+++ b/arm_compute/core/CL/kernels/CLCopyKernel.h
@@ -61,7 +61,7 @@ public:
* @param[in] padding (Optional) Padding to be applied to the input tensor
* @param[in] output_window (Optional) Window to be used in case only copying into part of a tensor. Default is nullptr.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding = PaddingList(), Window *output_window = nullptr);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding = PaddingList(), Window *output_window = nullptr);
/** Static function to check if given info will lead to a valid configuration of @ref CLCopyKernel
*
* @param[in] input Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
diff --git a/arm_compute/core/CL/kernels/CLCropKernel.h b/arm_compute/core/CL/kernels/CLCropKernel.h
index 103986a5f8..a1c6f901eb 100644
--- a/arm_compute/core/CL/kernels/CLCropKernel.h
+++ b/arm_compute/core/CL/kernels/CLCropKernel.h
@@ -71,7 +71,7 @@ public:
* @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0.
* @param[in] output_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0,
Window *output_window = nullptr);
/** Static function to check if given info will lead to a valid configuration of @ref CLStridedSliceKernel
diff --git a/arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h b/arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h
index 7e8a45fd8f..0c65f519cc 100644
--- a/arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h
+++ b/arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h
@@ -62,7 +62,7 @@ public:
* @param[out] output Destination tensor. Data types supported: same as @p input. All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane.
* @param[in] info Contains padding and stride information described in @ref PadStrideInfo.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PadStrideInfo &info);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PadStrideInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayerUpsample
*
* @param[in] input Source tensor info. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h b/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
index daeb8c1f9c..292c561e46 100644
--- a/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
+++ b/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
@@ -79,7 +79,7 @@ public:
* @param[in] weights_info Deconvolution weights tensor info. Supported data types: same as @p input. Supported data layouts: same as @p input.
* @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. This kernel supports only stride_x = weights.width && stride_y = weights.height. Moreover, padding is not supported.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
const PadStrideInfo &deconv_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionReshapeOutputKernel.
diff --git a/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
index 7b594417d6..5fe826d090 100644
--- a/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
@@ -72,7 +72,7 @@ public:
* @note: The gaps between the two lowest dimensions of input and output need to be divisible by 2.
*
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int depth_offset, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int depth_offset, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthConcatenateLayerKernel
*
* @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
diff --git a/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h b/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
index 8bbf9b3dce..66eb6222b2 100644
--- a/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
@@ -75,7 +75,7 @@ public:
* @param[in] policy Conversion policy
* @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthConvertLayerKernel
*
* @param[in] input Source tensor info. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
diff --git a/arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h b/arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h
index 541506b521..87ac3c1ec1 100644
--- a/arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthToSpaceLayerKernel.h
@@ -61,7 +61,7 @@ public:
* @param[out] output Tensor output. Data types supported: same as @p input
* @param[in] block_shape Block shape value.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthToSpaceLayerKernel.
*
* @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: All.
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
index f68fde4737..6cf0326467 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h
@@ -75,7 +75,7 @@ public:
* @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override;
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NCHWKernel
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
index f9fda0a42c..e564cf6fe0 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h
@@ -76,7 +76,7 @@ public:
* @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override;
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
index db26b4a06f..8847cf9c46 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h
@@ -87,7 +87,7 @@ public:
* @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h
index e7fc6f8d81..8dc5d32e4f 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h
@@ -59,7 +59,7 @@ public:
* @param[out] output The output tensor of dimension [W*H*C0, ceil(IFM/C0)]. C0 is the number of channels read by each thread. Data types supported: same as @p weights.
* @param[in] info Depthwise convolution information to reshape the input tensor.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
*
diff --git a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
index 4cb1339300..bb154f1a5b 100644
--- a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
@@ -58,7 +58,7 @@ public:
* @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] output Destination tensor. Data types supported: F16/F32.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayerKernel
*
* @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
diff --git a/arm_compute/core/CL/kernels/CLDerivativeKernel.h b/arm_compute/core/CL/kernels/CLDerivativeKernel.h
index 5d5ad860f3..cd8ae90c2d 100644
--- a/arm_compute/core/CL/kernels/CLDerivativeKernel.h
+++ b/arm_compute/core/CL/kernels/CLDerivativeKernel.h
@@ -66,7 +66,7 @@ public:
* @param[out] output_y (Optional) Destination tensor for the Y gradient, Data types supported: S16.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLDilateKernel.h b/arm_compute/core/CL/kernels/CLDilateKernel.h
index 9c41a84b31..45f5fe0764 100644
--- a/arm_compute/core/CL/kernels/CLDilateKernel.h
+++ b/arm_compute/core/CL/kernels/CLDilateKernel.h
@@ -50,7 +50,7 @@ public:
* @param[out] output The output tensor. Data types supported: U8.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
// Inherited methods overridden:
BorderSize border_size() const override;
diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
index f1409b6339..489d7c27c5 100644
--- a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
@@ -88,7 +88,7 @@ public:
* The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayerKernel
*
* @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
diff --git a/arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h b/arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h
index 1f76992b96..e190bdebbe 100644
--- a/arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h
@@ -50,7 +50,7 @@ public:
* @param[out] output Output tensor. Data types supported: Same as @p input.
* @param[in] op Element wise unary operation to perform.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ElementWiseUnary &op);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ElementWiseUnary &op);
/** Static function to check if given info will lead to a valid configuration of @ref CLElementWiseUnaryLayerKernel
*
* @param[in] input First tensor input info. Data types supported: F16/F32.
diff --git a/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h b/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h
index 2f1060126a..4d3d4bc834 100644
--- a/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h
+++ b/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h
@@ -99,7 +99,7 @@ protected:
/** Commmon configure function for element-wise operators with no additional options (e.g., Div, Min, Max, SquaredDiff)
*
*/
- void configure_common(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+ void configure_common(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
ActivationLayerInfo _act_info;
@@ -138,7 +138,7 @@ public:
* @param[in] policy Policy to use to handle overflow.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy,
+ void configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy,
const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel
@@ -194,7 +194,7 @@ public:
* @param[in] output Output tensor. Data types supported: Same as @p input1.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
+ void configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel
diff --git a/arm_compute/core/CL/kernels/CLErodeKernel.h b/arm_compute/core/CL/kernels/CLErodeKernel.h
index 8ba6ff8408..cbc748194c 100644
--- a/arm_compute/core/CL/kernels/CLErodeKernel.h
+++ b/arm_compute/core/CL/kernels/CLErodeKernel.h
@@ -50,7 +50,7 @@ public:
* @param[out] output The output tensor. Data types supported: U8.
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined);
// Inherited methods overridden:
BorderSize border_size() const override;
diff --git a/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h b/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h
index eac03ff868..a8da1246bb 100644
--- a/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h
+++ b/arm_compute/core/CL/kernels/CLFFTDigitReverseKernel.h
@@ -65,7 +65,7 @@ public:
* @param[in] idx Digit reverse index tensor. Data type supported: U32
* @param[in] config Kernel configuration.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *idx, const FFTDigitReverseKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTDigitReverseKernel
*
* @param[in] input Source tensor info. Data types supported: F32.
diff --git a/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h b/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h
index 85bf4cce66..e3f53462d9 100644
--- a/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h
+++ b/arm_compute/core/CL/kernels/CLFFTRadixStageKernel.h
@@ -69,7 +69,7 @@ public:
* @param[out] output Destination tensor. Can be nullptr. Data type supported: same as @p input
* @param[in] config FFT descriptor metadata.
*/
- void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTRadixStageKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTRadixStageKernel
*
* @param[in] input Source tensor info. Data types supported: F32.
diff --git a/arm_compute/core/CL/kernels/CLFFTScaleKernel.h b/arm_compute/core/CL/kernels/CLFFTScaleKernel.h
index cd4fe58b9c..d0d2b7613c 100644
--- a/arm_compute/core/CL/kernels/CLFFTScaleKernel.h
+++ b/arm_compute/core/CL/kernels/CLFFTScaleKernel.h
@@ -63,7 +63,7 @@ public:
* @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] config Kernel configuration
*/
- void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const FFTScaleKernelInfo &config);
/** Static function to check if given info will lead to a valid configuration of @ref CLFFTScaleKernel
*
* @param[in] input Source tensor info. Data types supported: F32.
diff --git a/arm_compute/core/CL/kernels/CLFastCornersKernel.h b/arm_compute/core/CL/kernels/CLFastCornersKernel.h
index 2a6102036f..1a0d4e36a5 100644
--- a/arm_compute/core/CL/kernels/CLFastCornersKernel.h
+++ b/arm_compute/core/CL/kernels/CLFastCornersKernel.h
@@ -75,7 +75,7 @@ public:
* @param[in] non_max_suppression True if non-maxima suppresion is applied, false otherwise.
* @param[in] border_mode Strategy to use for borders.
*/
- void configure(CLCompileContext &compile_context, const ICLImage *input, ICLImage *output, float threshold, bool non_max_suppression, BorderMode border_mode);
+ void configure(const CLCompileContext &compile_context, const ICLImage *input, ICLImage *output, float threshold, bool non_max_suppression, BorderMode border_mode);
// Inherited methods overridden
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -119,7 +119,7 @@ public:
* @param[out] corners Array of keypoints to store the results.
* @param[out] num_buffers Number of keypoints to store the results.
*/
- void configure(CLCompileContext &compile_context, const ICLImage *input, bool update_number, ICLKeyPointArray *corners, cl::Buffer *num_buffers);
+ void configure(const CLCompileContext &compile_context, const ICLImage *input, bool update_number, ICLKeyPointArray *corners, cl::Buffer *num_buffers);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLFillBorderKernel.h b/arm_compute/core/CL/kernels/CLFillBorderKernel.h
index 226b611bcb..d00ea55a83 100644
--- a/arm_compute/core/CL/kernels/CLFillBorderKernel.h
+++ b/arm_compute/core/CL/kernels/CLFillBorderKernel.h
@@ -65,7 +65,7 @@ public:
* @param[in] border_mode Border mode to use for the convolution.
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
*/
- void configure(CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue());
+ void configure(const CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue());
/** Function to set the constant value on fill border kernel depending on type.
*
diff --git a/arm_compute/core/CL/kernels/CLFlattenLayerKernel.h b/arm_compute/core/CL/kernels/CLFlattenLayerKernel.h
index b795e03a34..ab009e1aa8 100644
--- a/arm_compute/core/CL/kernels/CLFlattenLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLFlattenLayerKernel.h
@@ -60,7 +60,7 @@ public:
* @param[out] output Output tensor with shape [w*h*d, input_batches] where:
* w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLFlattenLayerKernel
*
* @param[in] input First input tensor to flatten with at least 3 dimensions.
diff --git a/arm_compute/core/CL/kernels/CLFloorKernel.h b/arm_compute/core/CL/kernels/CLFloorKernel.h
index a3ccb96c61..4d1ed789db 100644
--- a/arm_compute/core/CL/kernels/CLFloorKernel.h
+++ b/arm_compute/core/CL/kernels/CLFloorKernel.h
@@ -59,7 +59,7 @@ public:
* @param[in] input Source tensor. Data type supported: F16/F32.
* @param[out] output Destination tensor. Same as @p input
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLFloorKernel
*
diff --git a/arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h b/arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h
index 2d62a576bb..2fe6b223ca 100644
--- a/arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h
+++ b/arm_compute/core/CL/kernels/CLFuseBatchNormalizationKernel.h
@@ -81,7 +81,7 @@ public:
* @param[in] epsilon (Optional) Batch normalization layer epsilon parameter. Defaults to 0.001f.
* @param[in] fbn_type (Optional) Fused batch normalization type. Defaults to CONVOLUTION.
*/
- void configure(CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var, ICLTensor *fused_weights, ICLTensor *fused_bias,
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var, ICLTensor *fused_weights, ICLTensor *fused_bias,
const ICLTensor *input_bias = nullptr, const ICLTensor *bn_beta = nullptr, const ICLTensor *bn_gamma = nullptr,
float epsilon = 0.001f, FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
/** Static function to check if given info will lead to a valid configuration of @ref CLFuseBatchNormalizationKernel
diff --git a/arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h b/arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h
index 040ca157de..f0f7754960 100644
--- a/arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h
+++ b/arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h
@@ -86,7 +86,7 @@ public:
* @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
* the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
*/
- virtual void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ virtual void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U),
const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) = 0;