From 2803f703fe149f8a04c96d484266b7401e1ad355 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Tue, 21 Apr 2020 16:20:03 +0100 Subject: COMPMID-3280: Make all ML primitives for CL use the new interface - Part1 - Fix3 - const fix in the CLKernels part 3 Change-Id: I9cfb896f334145249a97c9287fa00399b8319a8e Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3075 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- arm_compute/core/CL/kernels/CLROIPoolingLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLRangeKernel.h | 2 +- arm_compute/core/CL/kernels/CLReductionOperationKernel.h | 2 +- arm_compute/core/CL/kernels/CLRemapKernel.h | 2 +- arm_compute/core/CL/kernels/CLReorgLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLReshapeLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLReverseKernel.h | 2 +- arm_compute/core/CL/kernels/CLScaleKernel.h | 2 +- arm_compute/core/CL/kernels/CLScharr3x3Kernel.h | 2 +- arm_compute/core/CL/kernels/CLSelectKernel.h | 2 +- arm_compute/core/CL/kernels/CLSobel3x3Kernel.h | 2 +- arm_compute/core/CL/kernels/CLSobel5x5Kernel.h | 4 ++-- arm_compute/core/CL/kernels/CLSobel7x7Kernel.h | 4 ++-- arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h | 8 ++++---- arm_compute/core/CL/kernels/CLSpaceToBatchLayerKernel.h | 4 ++-- arm_compute/core/CL/kernels/CLSpaceToDepthLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLStackLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLStridedSliceKernel.h | 2 +- arm_compute/core/CL/kernels/CLTableLookupKernel.h | 2 +- arm_compute/core/CL/kernels/CLThresholdKernel.h | 2 +- arm_compute/core/CL/kernels/CLTileKernel.h | 2 +- arm_compute/core/CL/kernels/CLTransposeKernel.h | 2 +- arm_compute/core/CL/kernels/CLUpsampleLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLWarpAffineKernel.h | 2 +- arm_compute/core/CL/kernels/CLWarpPerspectiveKernel.h | 2 +- arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h | 2 +- arm_compute/core/CL/kernels/CLWidthConcatenate2TensorsKernel.h | 2 +- arm_compute/core/CL/kernels/CLWidthConcatenate4TensorsKernel.h | 2 +- arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h | 2 +- arm_compute/core/CL/kernels/CLWinogradInputTransformKernel.h | 2 +- arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h | 2 +- arm_compute/core/CL/kernels/CLYOLOLayerKernel.h | 2 +- src/core/CL/kernels/CLROIPoolingLayerKernel.cpp | 2 +- src/core/CL/kernels/CLRangeKernel.cpp | 2 +- src/core/CL/kernels/CLReductionOperationKernel.cpp | 2 +- src/core/CL/kernels/CLRemapKernel.cpp | 2 +- src/core/CL/kernels/CLReorgLayerKernel.cpp | 2 +- src/core/CL/kernels/CLReshapeLayerKernel.cpp | 2 +- src/core/CL/kernels/CLReverseKernel.cpp | 2 +- src/core/CL/kernels/CLScaleKernel.cpp | 2 +- src/core/CL/kernels/CLScharr3x3Kernel.cpp | 2 +- src/core/CL/kernels/CLSelectKernel.cpp | 2 +- src/core/CL/kernels/CLSobel3x3Kernel.cpp | 2 +- src/core/CL/kernels/CLSobel5x5Kernel.cpp | 4 ++-- src/core/CL/kernels/CLSobel7x7Kernel.cpp | 4 ++-- src/core/CL/kernels/CLSoftmaxLayerKernel.cpp | 4 ++-- src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp | 4 ++-- src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp | 2 +- src/core/CL/kernels/CLStackLayerKernel.cpp | 2 +- src/core/CL/kernels/CLStridedSliceKernel.cpp | 2 +- src/core/CL/kernels/CLTableLookupKernel.cpp | 2 +- src/core/CL/kernels/CLThresholdKernel.cpp | 2 +- src/core/CL/kernels/CLTileKernel.cpp | 2 +- src/core/CL/kernels/CLTransposeKernel.cpp | 2 +- src/core/CL/kernels/CLUpsampleLayerKernel.cpp | 2 +- src/core/CL/kernels/CLWarpAffineKernel.cpp | 2 +- src/core/CL/kernels/CLWarpPerspectiveKernel.cpp | 2 +- src/core/CL/kernels/CLWeightsReshapeKernel.cpp | 2 +- src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp | 2 +- src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp | 2 +- src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp | 2 +- src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp | 2 +- src/core/CL/kernels/CLWinogradInputTransformKernel.cpp | 2 +- src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp | 2 +- src/core/CL/kernels/CLYOLOLayerKernel.cpp | 2 +- 66 files changed, 76 insertions(+), 76 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLROIPoolingLayerKernel.h b/arm_compute/core/CL/kernels/CLROIPoolingLayerKernel.h index 8ba1b35171..ea70a58188 100644 --- a/arm_compute/core/CL/kernels/CLROIPoolingLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLROIPoolingLayerKernel.h @@ -77,7 +77,7 @@ public: * @note The z dimensions of @p output tensor and @p input tensor must be the same. * @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois array. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLRangeKernel.h b/arm_compute/core/CL/kernels/CLRangeKernel.h index 5cc4a220ca..fc8db98bf9 100644 --- a/arm_compute/core/CL/kernels/CLRangeKernel.h +++ b/arm_compute/core/CL/kernels/CLRangeKernel.h @@ -67,7 +67,7 @@ public: * @param[in] end The ending (not including) value of the sequence. * @param[in] step The gap between each pair of values in the sequence. */ - void configure(CLCompileContext &compile_context, ICLTensor *output, float start, float end, float step); + void configure(const CLCompileContext &compile_context, ICLTensor *output, float start, float end, float step); /** Static function to check if given info will lead to a valid configuration of @ref CLRangeKernel * * @param[in] output Output tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. diff --git a/arm_compute/core/CL/kernels/CLReductionOperationKernel.h b/arm_compute/core/CL/kernels/CLReductionOperationKernel.h index bdab58bea1..0b0b4ae9b0 100644 --- a/arm_compute/core/CL/kernels/CLReductionOperationKernel.h +++ b/arm_compute/core/CL/kernels/CLReductionOperationKernel.h @@ -69,7 +69,7 @@ public: * @param[in] op Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX * @param[in] width (Optional) In case of x-axis we also need to provide the width of the input image. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width = 0); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width = 0); /** Static function to check if given info will lead to a valid configuration of @ref CLReductionOperationKernel. * diff --git a/arm_compute/core/CL/kernels/CLRemapKernel.h b/arm_compute/core/CL/kernels/CLRemapKernel.h index 14f4b2ddb5..f3d1511905 100644 --- a/arm_compute/core/CL/kernels/CLRemapKernel.h +++ b/arm_compute/core/CL/kernels/CLRemapKernel.h @@ -65,7 +65,7 @@ public: * @param[in] policy The interpolation type. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy, bool border_undefined); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLReorgLayerKernel.h b/arm_compute/core/CL/kernels/CLReorgLayerKernel.h index 65304c1cc6..9c064858af 100644 --- a/arm_compute/core/CL/kernels/CLReorgLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLReorgLayerKernel.h @@ -65,7 +65,7 @@ public: * @param[in] stride Stride value to use for reorganizing the values in the output tensor. * It defines the spatial distance between 2 consecutive pixels in the x and y direction */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t stride); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t stride); /** Static function to check if given info will lead to a valid configuration of @ref CLReorgLayerKernel * * @param[in] input Source tensor. Data types supported: All. diff --git a/arm_compute/core/CL/kernels/CLReshapeLayerKernel.h b/arm_compute/core/CL/kernels/CLReshapeLayerKernel.h index f9588e818f..3ea74114d0 100644 --- a/arm_compute/core/CL/kernels/CLReshapeLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLReshapeLayerKernel.h @@ -59,7 +59,7 @@ public: * @param[in] input Source tensor. Data type supported: All. * @param[out] output Destination tensor. Data type supported: Same as @p input */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLReshapeLayerKernel * diff --git a/arm_compute/core/CL/kernels/CLReverseKernel.h b/arm_compute/core/CL/kernels/CLReverseKernel.h index b1547cfb9f..e8f4507969 100644 --- a/arm_compute/core/CL/kernels/CLReverseKernel.h +++ b/arm_compute/core/CL/kernels/CLReverseKernel.h @@ -60,7 +60,7 @@ public: * @param[out] output Output tensor. Data type supported: Same as @p input * @param[in] axis Axis tensor. Contains the indices of the dimensions to reverse. Data type supported: U32 */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *axis); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *axis); /** Static function to check if given info will lead to a valid configuration of @ref CLReverseKernel * diff --git a/arm_compute/core/CL/kernels/CLScaleKernel.h b/arm_compute/core/CL/kernels/CLScaleKernel.h index 02dfb3eccf..328578d88c 100644 --- a/arm_compute/core/CL/kernels/CLScaleKernel.h +++ b/arm_compute/core/CL/kernels/CLScaleKernel.h @@ -57,7 +57,7 @@ public: * @param[in] sampling_policy (Optional) Sampling policy used by the interpolation. Defaults to @ref SamplingPolicy::CENTER * @param[in] align_corners (Optional) Align corners of input and output, only affecting bilinear policy with TOP_LEFT sampling policy. Defaults to false. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, InterpolationPolicy policy, BorderMode border_mode, + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy = SamplingPolicy::CENTER, bool align_corners = false); /** Static function to check if given info will lead to a valid configuration of @ref CLScaleKernel diff --git a/arm_compute/core/CL/kernels/CLScharr3x3Kernel.h b/arm_compute/core/CL/kernels/CLScharr3x3Kernel.h index 1cdb66715e..209a150a67 100644 --- a/arm_compute/core/CL/kernels/CLScharr3x3Kernel.h +++ b/arm_compute/core/CL/kernels/CLScharr3x3Kernel.h @@ -80,7 +80,7 @@ public: * @param[out] output_y (Optional) Destination tensor for the Y gradient, Data types supported: S16. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLSelectKernel.h b/arm_compute/core/CL/kernels/CLSelectKernel.h index 02f4cccfdb..5cbd985cda 100644 --- a/arm_compute/core/CL/kernels/CLSelectKernel.h +++ b/arm_compute/core/CL/kernels/CLSelectKernel.h @@ -68,7 +68,7 @@ public: * @param[out] y Second input tensor. Data types supported: Same as @p x * @param[in] output Output tensor. Data types supported: Same as @p x. */ - void configure(CLCompileContext &compile_context, const ICLTensor *c, const ICLTensor *x, const ICLTensor *y, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *c, const ICLTensor *x, const ICLTensor *y, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLSelectKernel * * @param[in] c Condition input tensor. Data types supported: U8. diff --git a/arm_compute/core/CL/kernels/CLSobel3x3Kernel.h b/arm_compute/core/CL/kernels/CLSobel3x3Kernel.h index 3970c07b5a..4240fe80b3 100644 --- a/arm_compute/core/CL/kernels/CLSobel3x3Kernel.h +++ b/arm_compute/core/CL/kernels/CLSobel3x3Kernel.h @@ -66,7 +66,7 @@ public: * @param[out] output_y (Optional) Destination tensor for the Y gradient, Data types supported: S16. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLSobel5x5Kernel.h b/arm_compute/core/CL/kernels/CLSobel5x5Kernel.h index 0aff209931..ef30f0ec93 100644 --- a/arm_compute/core/CL/kernels/CLSobel5x5Kernel.h +++ b/arm_compute/core/CL/kernels/CLSobel5x5Kernel.h @@ -67,7 +67,7 @@ public: * @param[out] output_y (Optional) Destination tensor for the Y gradient, Data types supported: S16. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -121,7 +121,7 @@ public: * @param[out] output_y (Optional) Destination tensor for the Y gradient, Data types supported: S16. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLSobel7x7Kernel.h b/arm_compute/core/CL/kernels/CLSobel7x7Kernel.h index 31809b1cf4..4eda5a40d4 100644 --- a/arm_compute/core/CL/kernels/CLSobel7x7Kernel.h +++ b/arm_compute/core/CL/kernels/CLSobel7x7Kernel.h @@ -67,7 +67,7 @@ public: * @param[out] output_y (Optional) Destination tensor for the Y gradient, Data types supported: S32. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -121,7 +121,7 @@ public: * @param[out] output_y (Optional) Destination tensor for the Y gradient, Data types supported: S32. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h index 800d909a1c..b174f493b5 100644 --- a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h @@ -49,7 +49,7 @@ public: * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32 * @param[out] output Destination tensor. Data types supported: same as @p input */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxKernel * * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32 @@ -92,7 +92,7 @@ public: * @param[out] sum Sum of 1D logits tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.0 */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f); /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DShiftExpSumKernel * * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32 @@ -150,7 +150,7 @@ public: * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info); /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxShiftExpSumKernel * * @param[in] input Source tensor. Data types supported: F16/F32 @@ -217,7 +217,7 @@ public: * @param[out] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info); /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DNormKernel * * @param[in] input Source tensor. Data types supported: S32/F16/F32 diff --git a/arm_compute/core/CL/kernels/CLSpaceToBatchLayerKernel.h b/arm_compute/core/CL/kernels/CLSpaceToBatchLayerKernel.h index 34f0b669c4..799b7b16c3 100644 --- a/arm_compute/core/CL/kernels/CLSpaceToBatchLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLSpaceToBatchLayerKernel.h @@ -63,7 +63,7 @@ public: * @param[in] paddings 2-D tensor with shape [2, M]. Data types supported: S32 * @param[out] output Tensor output. Data types supported: same as @p input */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, const ICLTensor *paddings, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, const ICLTensor *paddings, ICLTensor *output); /** Initialise the kernel's input and output. (Static block shape and paddings) * * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All. @@ -84,7 +84,7 @@ public: * @param[in] padding_right The right padding of the output tensor. * @param[out] output Tensor output. Data types supported: same as @p input */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLSpaceToBatchLayerKernel * * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All. diff --git a/arm_compute/core/CL/kernels/CLSpaceToDepthLayerKernel.h b/arm_compute/core/CL/kernels/CLSpaceToDepthLayerKernel.h index 3f20f665dd..f2371e7d87 100644 --- a/arm_compute/core/CL/kernels/CLSpaceToDepthLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLSpaceToDepthLayerKernel.h @@ -61,7 +61,7 @@ public: * @param[out] output Tensor output. Data types supported: same as @p input * @param[in] block_shape Block shape value. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape); /** Static function to check if given info will lead to a valid configuration of @ref CLSpaceToDepthLayerKernel. * * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: All. diff --git a/arm_compute/core/CL/kernels/CLStackLayerKernel.h b/arm_compute/core/CL/kernels/CLStackLayerKernel.h index 19925c251d..e11c0a30d6 100644 --- a/arm_compute/core/CL/kernels/CLStackLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLStackLayerKernel.h @@ -74,7 +74,7 @@ public: * @param[out] output Output tensor. Data types supported: Same as @p input. * */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLStackLayerKernel * * @note Supported input tensor rank: up to 4 diff --git a/arm_compute/core/CL/kernels/CLStridedSliceKernel.h b/arm_compute/core/CL/kernels/CLStridedSliceKernel.h index 2e668821bd..ebe1b38878 100644 --- a/arm_compute/core/CL/kernels/CLStridedSliceKernel.h +++ b/arm_compute/core/CL/kernels/CLStridedSliceKernel.h @@ -82,7 +82,7 @@ public: * @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1. * A slice of size 1 starting from starts[i] in the dimension must be preserved. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Coordinates &starts, const Coordinates &ends, const BiStrides &strides, int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask); diff --git a/arm_compute/core/CL/kernels/CLTableLookupKernel.h b/arm_compute/core/CL/kernels/CLTableLookupKernel.h index 9bbaf26d7a..24e333f164 100644 --- a/arm_compute/core/CL/kernels/CLTableLookupKernel.h +++ b/arm_compute/core/CL/kernels/CLTableLookupKernel.h @@ -49,7 +49,7 @@ public: * @param[in] lut The input LUT. Data types supported: U8, S16. * @param[out] output The output tensor. Data types supported: U8, S16. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLLut *lut, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLLut *lut, ICLTensor *output); }; } // namespace arm_compute #endif /* ARM_COMPUTE_CLTABLELOOKUPKERNEL_H */ diff --git a/arm_compute/core/CL/kernels/CLThresholdKernel.h b/arm_compute/core/CL/kernels/CLThresholdKernel.h index 79e9f01aa2..3db48706a3 100644 --- a/arm_compute/core/CL/kernels/CLThresholdKernel.h +++ b/arm_compute/core/CL/kernels/CLThresholdKernel.h @@ -62,7 +62,7 @@ public: * @param[in] type Thresholding type. Either RANGE or BINARY. * @param[in] upper Upper threshold. Only used when the thresholding type is RANGE. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, uint8_t threshold, + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, uint8_t threshold, uint8_t false_value, uint8_t true_value, ThresholdType type, uint8_t upper); }; } // namespace arm_compute diff --git a/arm_compute/core/CL/kernels/CLTileKernel.h b/arm_compute/core/CL/kernels/CLTileKernel.h index 1c9186c4dd..68f3c929a6 100644 --- a/arm_compute/core/CL/kernels/CLTileKernel.h +++ b/arm_compute/core/CL/kernels/CLTileKernel.h @@ -64,7 +64,7 @@ public: * @param[out] output Destination tensor. Same as @p input * */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Multiples &multiples); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Multiples &multiples); /** Static function to check if given info will lead to a valid configuration of @ref CLTileKernel * * @param[in] input Source tensor info. Data type supported: All. diff --git a/arm_compute/core/CL/kernels/CLTransposeKernel.h b/arm_compute/core/CL/kernels/CLTransposeKernel.h index 37bd716f3d..09c9e3babf 100644 --- a/arm_compute/core/CL/kernels/CLTransposeKernel.h +++ b/arm_compute/core/CL/kernels/CLTransposeKernel.h @@ -50,7 +50,7 @@ public: * @param[in] input Input tensor. Data types supported: All. * @param[out] output Output tensor. Data type supported: Same as @p input */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLTransposeKernel * * @param[in] input Input tensor. Data types supported: All. diff --git a/arm_compute/core/CL/kernels/CLUpsampleLayerKernel.h b/arm_compute/core/CL/kernels/CLUpsampleLayerKernel.h index 556e5484d7..e6b4209501 100644 --- a/arm_compute/core/CL/kernels/CLUpsampleLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLUpsampleLayerKernel.h @@ -63,7 +63,7 @@ public: * @param[in] info Contains stride information described in @ref Size2D. * @param[in] upsampling_policy Defines the policy to fill the intermediate pixels. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &info, const InterpolationPolicy upsampling_policy); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &info, const InterpolationPolicy upsampling_policy); /** Static function to check if given info will lead to a valid configuration of @ref CLUpsampleLayerKernel * * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. diff --git a/arm_compute/core/CL/kernels/CLWarpAffineKernel.h b/arm_compute/core/CL/kernels/CLWarpAffineKernel.h index bd26705ea4..a21325e1c4 100644 --- a/arm_compute/core/CL/kernels/CLWarpAffineKernel.h +++ b/arm_compute/core/CL/kernels/CLWarpAffineKernel.h @@ -53,7 +53,7 @@ public: * The matrix argument requires 9 values, the last 3 values are ignored. * @param[in] policy The interpolation type. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array &matrix, InterpolationPolicy policy); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array &matrix, InterpolationPolicy policy); // Inherited methods overridden: BorderSize border_size() const override; diff --git a/arm_compute/core/CL/kernels/CLWarpPerspectiveKernel.h b/arm_compute/core/CL/kernels/CLWarpPerspectiveKernel.h index 4f4ff34f1d..bb1a018a2b 100644 --- a/arm_compute/core/CL/kernels/CLWarpPerspectiveKernel.h +++ b/arm_compute/core/CL/kernels/CLWarpPerspectiveKernel.h @@ -50,7 +50,7 @@ public: * @param[in] matrix The perspective matrix. Must be 3x3 of type float. * @param[in] policy The interpolation type. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array &matrix, InterpolationPolicy policy); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array &matrix, InterpolationPolicy policy); // Inherited methods overridden: BorderSize border_size() const override; diff --git a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h index f09eea958c..47e987b09b 100644 --- a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h +++ b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h @@ -92,7 +92,7 @@ public: * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1); /** Static function to check if given info will lead to a valid configuration of @ref CLWeightsReshapeKernel * * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared, diff --git a/arm_compute/core/CL/kernels/CLWidthConcatenate2TensorsKernel.h b/arm_compute/core/CL/kernels/CLWidthConcatenate2TensorsKernel.h index 50abf65983..a39ccc2869 100644 --- a/arm_compute/core/CL/kernels/CLWidthConcatenate2TensorsKernel.h +++ b/arm_compute/core/CL/kernels/CLWidthConcatenate2TensorsKernel.h @@ -64,7 +64,7 @@ public: * @param[in] input2 Second input tensor. Data types supported: same as @p input1 * @param[out] output Output tensor. Data types supported: Same as @p input1. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLWidthConcatenate2TensorsKernel * * @param[in] input1 First tensor info. Data types supported: All. diff --git a/arm_compute/core/CL/kernels/CLWidthConcatenate4TensorsKernel.h b/arm_compute/core/CL/kernels/CLWidthConcatenate4TensorsKernel.h index f203602a12..0e0eae6e85 100644 --- a/arm_compute/core/CL/kernels/CLWidthConcatenate4TensorsKernel.h +++ b/arm_compute/core/CL/kernels/CLWidthConcatenate4TensorsKernel.h @@ -68,7 +68,7 @@ public: * @param[in] input4 Fourth input tensor. Data types supported: same as @p input1 * @param[out] output Output tensor. Data types supported: Same as @p input1. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, const ICLTensor *input3, const ICLTensor *input4, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, const ICLTensor *input3, const ICLTensor *input4, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLWidthConcatenate4TensorsKernel * * @param[in] input1 First tensor info. Data types supported: All. diff --git a/arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h b/arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h index 4564d774e3..ef5851fa9a 100644 --- a/arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h @@ -66,7 +66,7 @@ public: * @param[in,out] output Output tensor. Data types supported: Same as @p input. * */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int width_offset, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int width_offset, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLWidthConcatenateLayerKernel * * @param[in] input Input tensor info. Data types supported: All. diff --git a/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h b/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h index bc7573dc9e..5b2dc8cfc9 100644 --- a/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h +++ b/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h @@ -82,7 +82,7 @@ public: * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_filter_transform_shape. Data types supported: Same as @p input * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info); /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradFilterTransformKernel * * @note Winograd filter transform supports the following configurations for NCWH data layout diff --git a/arm_compute/core/CL/kernels/CLWinogradInputTransformKernel.h b/arm_compute/core/CL/kernels/CLWinogradInputTransformKernel.h index 6bb8d6e616..a305126f2d 100644 --- a/arm_compute/core/CL/kernels/CLWinogradInputTransformKernel.h +++ b/arm_compute/core/CL/kernels/CLWinogradInputTransformKernel.h @@ -80,7 +80,7 @@ public: * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info); /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradInputTransformKernel * * @note Winograd input transform supports the following configurations for NCWH data layout diff --git a/arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h b/arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h index aab244bb90..512b352637 100644 --- a/arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h +++ b/arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h @@ -86,7 +86,7 @@ public: * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info, + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradOutputTransformKernel diff --git a/arm_compute/core/CL/kernels/CLYOLOLayerKernel.h b/arm_compute/core/CL/kernels/CLYOLOLayerKernel.h index c03fc94f91..d0c4a9e417 100644 --- a/arm_compute/core/CL/kernels/CLYOLOLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLYOLOLayerKernel.h @@ -73,7 +73,7 @@ public: * @param[in] act_info Activation layer information. * @param[in] num_classes Number of classes to activate (must be submultiple of @p input channels) */ - void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes); + void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes); /** Static function to check if given info will lead to a valid configuration of @ref CLYOLOLayerKernel * * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result diff --git a/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp b/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp index 5f64215485..a5b80eb5ef 100644 --- a/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp +++ b/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp @@ -76,7 +76,7 @@ void CLROIPoolingLayerKernel::configure(const ICLTensor *input, const ICLTensor configure(CLKernelLibrary::get().get_compile_context(), input, rois, output, pool_info); } -void CLROIPoolingLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info) +void CLROIPoolingLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, rois, output); diff --git a/src/core/CL/kernels/CLRangeKernel.cpp b/src/core/CL/kernels/CLRangeKernel.cpp index 97b5f01df4..1e97649e0a 100644 --- a/src/core/CL/kernels/CLRangeKernel.cpp +++ b/src/core/CL/kernels/CLRangeKernel.cpp @@ -96,7 +96,7 @@ void CLRangeKernel::configure(ICLTensor *output, const float start, const float configure(CLKernelLibrary::get().get_compile_context(), output, start, end, step); } -void CLRangeKernel::configure(CLCompileContext &compile_context, ICLTensor *output, const float start, const float end, const float step) +void CLRangeKernel::configure(const CLCompileContext &compile_context, ICLTensor *output, const float start, const float end, const float step) { ARM_COMPUTE_ERROR_ON_NULLPTR(output); diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp index 5c760168ca..33e71445c4 100644 --- a/src/core/CL/kernels/CLReductionOperationKernel.cpp +++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp @@ -132,7 +132,7 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou configure(CLKernelLibrary::get().get_compile_context(), input, output, axis, op, width); } -void CLReductionOperationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width) +void CLReductionOperationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLRemapKernel.cpp b/src/core/CL/kernels/CLRemapKernel.cpp index fb425b512f..dcc425b1fc 100644 --- a/src/core/CL/kernels/CLRemapKernel.cpp +++ b/src/core/CL/kernels/CLRemapKernel.cpp @@ -52,7 +52,7 @@ void CLRemapKernel::configure(const ICLTensor *input, const ICLTensor *map_x, co configure(CLKernelLibrary::get().get_compile_context(), input, map_x, map_y, output, policy, border_undefined); } -void CLRemapKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy, +void CLRemapKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *map_x, const ICLTensor *map_y, ICLTensor *output, InterpolationPolicy policy, bool border_undefined) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); diff --git a/src/core/CL/kernels/CLReorgLayerKernel.cpp b/src/core/CL/kernels/CLReorgLayerKernel.cpp index e36bcbbe34..065e25ea41 100644 --- a/src/core/CL/kernels/CLReorgLayerKernel.cpp +++ b/src/core/CL/kernels/CLReorgLayerKernel.cpp @@ -75,7 +75,7 @@ void CLReorgLayerKernel::configure(const ICLTensor *input, ICLTensor *output, in configure(CLKernelLibrary::get().get_compile_context(), input, output, stride); } -void CLReorgLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t stride) +void CLReorgLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t stride) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), stride)); diff --git a/src/core/CL/kernels/CLReshapeLayerKernel.cpp b/src/core/CL/kernels/CLReshapeLayerKernel.cpp index 33a1ceacc4..ce792489c5 100644 --- a/src/core/CL/kernels/CLReshapeLayerKernel.cpp +++ b/src/core/CL/kernels/CLReshapeLayerKernel.cpp @@ -67,7 +67,7 @@ void CLReshapeLayerKernel::configure(const ICLTensor *input, ICLTensor *output) configure(CLKernelLibrary::get().get_compile_context(), input, output); } -void CLReshapeLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) +void CLReshapeLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); diff --git a/src/core/CL/kernels/CLReverseKernel.cpp b/src/core/CL/kernels/CLReverseKernel.cpp index d88a78c029..d2a3809359 100644 --- a/src/core/CL/kernels/CLReverseKernel.cpp +++ b/src/core/CL/kernels/CLReverseKernel.cpp @@ -68,7 +68,7 @@ void CLReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const configure(CLKernelLibrary::get().get_compile_context(), input, output, axis); } -void CLReverseKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *axis) +void CLReverseKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *axis) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, axis); diff --git a/src/core/CL/kernels/CLScaleKernel.cpp b/src/core/CL/kernels/CLScaleKernel.cpp index 33c7ad71c1..f41664f4e0 100644 --- a/src/core/CL/kernels/CLScaleKernel.cpp +++ b/src/core/CL/kernels/CLScaleKernel.cpp @@ -185,7 +185,7 @@ void CLScaleKernel::configure(const ICLTensor *input, ICLTensor *output, Interpo configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, border_mode, sampling_policy, align_corners); } -void CLScaleKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy, +void CLScaleKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy, bool align_corners) { _align_corners = policy == InterpolationPolicy::BILINEAR diff --git a/src/core/CL/kernels/CLScharr3x3Kernel.cpp b/src/core/CL/kernels/CLScharr3x3Kernel.cpp index 9f5bdb3cd8..cb657446f2 100644 --- a/src/core/CL/kernels/CLScharr3x3Kernel.cpp +++ b/src/core/CL/kernels/CLScharr3x3Kernel.cpp @@ -52,7 +52,7 @@ void CLScharr3x3Kernel::configure(const ICLTensor *input, ICLTensor *output_x, I configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined); } -void CLScharr3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) +void CLScharr3x3Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr)); diff --git a/src/core/CL/kernels/CLSelectKernel.cpp b/src/core/CL/kernels/CLSelectKernel.cpp index 866ec6bde2..2789764d10 100644 --- a/src/core/CL/kernels/CLSelectKernel.cpp +++ b/src/core/CL/kernels/CLSelectKernel.cpp @@ -107,7 +107,7 @@ void CLSelectKernel::configure(const ICLTensor *c, const ICLTensor *x, const ICL configure(CLKernelLibrary::get().get_compile_context(), c, x, y, output); } -void CLSelectKernel::configure(CLCompileContext &compile_context, const ICLTensor *c, const ICLTensor *x, const ICLTensor *y, ICLTensor *output) +void CLSelectKernel::configure(const CLCompileContext &compile_context, const ICLTensor *c, const ICLTensor *x, const ICLTensor *y, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(c, x, y, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(c->info(), x->info(), y->info(), output->info())); diff --git a/src/core/CL/kernels/CLSobel3x3Kernel.cpp b/src/core/CL/kernels/CLSobel3x3Kernel.cpp index 1c97c13d96..12d04d99fe 100644 --- a/src/core/CL/kernels/CLSobel3x3Kernel.cpp +++ b/src/core/CL/kernels/CLSobel3x3Kernel.cpp @@ -53,7 +53,7 @@ void CLSobel3x3Kernel::configure(const ICLTensor *input, ICLTensor *output_x, IC configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined); } -void CLSobel3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) +void CLSobel3x3Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr)); diff --git a/src/core/CL/kernels/CLSobel5x5Kernel.cpp b/src/core/CL/kernels/CLSobel5x5Kernel.cpp index 597807796e..a60bb0b838 100644 --- a/src/core/CL/kernels/CLSobel5x5Kernel.cpp +++ b/src/core/CL/kernels/CLSobel5x5Kernel.cpp @@ -53,7 +53,7 @@ void CLSobel5x5HorKernel::configure(const ICLTensor *input, ICLTensor *output_x, configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined); } -void CLSobel5x5HorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) +void CLSobel5x5HorKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr)); @@ -156,7 +156,7 @@ void CLSobel5x5VertKernel::configure(const ICLTensor *input_x, const ICLTensor * configure(CLKernelLibrary::get().get_compile_context(), input_x, input_y, output_x, output_y, border_undefined); } -void CLSobel5x5VertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) +void CLSobel5x5VertKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) { ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr)); diff --git a/src/core/CL/kernels/CLSobel7x7Kernel.cpp b/src/core/CL/kernels/CLSobel7x7Kernel.cpp index 183ebce3ac..a5fbe54678 100644 --- a/src/core/CL/kernels/CLSobel7x7Kernel.cpp +++ b/src/core/CL/kernels/CLSobel7x7Kernel.cpp @@ -53,7 +53,7 @@ void CLSobel7x7HorKernel::configure(const ICLTensor *input, ICLTensor *output_x, configure(CLKernelLibrary::get().get_compile_context(), input, output_x, output_y, border_undefined); } -void CLSobel7x7HorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) +void CLSobel7x7HorKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr)); @@ -158,7 +158,7 @@ void CLSobel7x7VertKernel::configure(const ICLTensor *input_x, const ICLTensor * configure(CLKernelLibrary::get().get_compile_context(), input_x, input_y, output_x, output_y, border_undefined); } -void CLSobel7x7VertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) +void CLSobel7x7VertKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input_x, const ICLTensor *input_y, ICLTensor *output_x, ICLTensor *output_y, bool border_undefined) { ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr)); diff --git a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp index 112d864827..09deb94a85 100644 --- a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp +++ b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp @@ -223,7 +223,7 @@ void CLLogits1DMaxShiftExpSumKernel::configure(const ICLTensor *input, ICLTensor configure(CLKernelLibrary::get().get_compile_context(), input, max, output, sum, info); } -void CLLogits1DMaxShiftExpSumKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info) +void CLLogits1DMaxShiftExpSumKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, max, sum, output); @@ -351,7 +351,7 @@ void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *su configure(CLKernelLibrary::get().get_compile_context(), input, sum, output, info); } -void CLLogits1DNormKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info) +void CLLogits1DNormKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output); diff --git a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp index 520924e764..cac6e32f2f 100644 --- a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp +++ b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp @@ -94,7 +94,7 @@ void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const ICLTenso configure(CLKernelLibrary::get().get_compile_context(), input, block_shape, paddings, output); } -void CLSpaceToBatchLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, const ICLTensor *paddings, ICLTensor *output) +void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, const ICLTensor *paddings, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), block_shape->info(), paddings->info(), output->info())); @@ -131,7 +131,7 @@ void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const int bloc configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, padding_left, padding_right, output); } -void CLSpaceToBatchLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, +void CLSpaceToBatchLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, ICLTensor *output) { diff --git a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp index b4bd3b8fbe..3e7c929b58 100644 --- a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp +++ b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp @@ -71,7 +71,7 @@ void CLSpaceToDepthLayerKernel::configure(const ICLTensor *input, ICLTensor *out configure(CLKernelLibrary::get().get_compile_context(), input, output, block_shape); } -void CLSpaceToDepthLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape) +void CLSpaceToDepthLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLStackLayerKernel.cpp b/src/core/CL/kernels/CLStackLayerKernel.cpp index bc8645b381..33797d7e18 100644 --- a/src/core/CL/kernels/CLStackLayerKernel.cpp +++ b/src/core/CL/kernels/CLStackLayerKernel.cpp @@ -85,7 +85,7 @@ void CLStackLayerKernel::configure(const ICLTensor *input, unsigned int axis, un configure(CLKernelLibrary::get().get_compile_context(), input, axis, idx_input, num_tensors, output); } -void CLStackLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output) +void CLStackLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), axis, idx_input, num_tensors, output->info())); diff --git a/src/core/CL/kernels/CLStridedSliceKernel.cpp b/src/core/CL/kernels/CLStridedSliceKernel.cpp index 99c0b0b312..18a5135afa 100644 --- a/src/core/CL/kernels/CLStridedSliceKernel.cpp +++ b/src/core/CL/kernels/CLStridedSliceKernel.cpp @@ -105,7 +105,7 @@ void CLStridedSliceKernel::configure(const ICLTensor *input, ICLTensor *output, configure(CLKernelLibrary::get().get_compile_context(), input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask); } -void CLStridedSliceKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, +void CLStridedSliceKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Coordinates &starts, const Coordinates &ends, const BiStrides &strides, int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask) { diff --git a/src/core/CL/kernels/CLTableLookupKernel.cpp b/src/core/CL/kernels/CLTableLookupKernel.cpp index f6c6ffbae8..07827d5bdd 100644 --- a/src/core/CL/kernels/CLTableLookupKernel.cpp +++ b/src/core/CL/kernels/CLTableLookupKernel.cpp @@ -41,7 +41,7 @@ void CLTableLookupKernel::configure(const ICLTensor *input, const ICLLut *lut, I configure(CLKernelLibrary::get().get_compile_context(), input, lut, output); } -void CLTableLookupKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLLut *lut, ICLTensor *output) +void CLTableLookupKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLLut *lut, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S16); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16); diff --git a/src/core/CL/kernels/CLThresholdKernel.cpp b/src/core/CL/kernels/CLThresholdKernel.cpp index 3a94faca4b..4f984632bc 100644 --- a/src/core/CL/kernels/CLThresholdKernel.cpp +++ b/src/core/CL/kernels/CLThresholdKernel.cpp @@ -40,7 +40,7 @@ void CLThresholdKernel::configure(const ICLTensor *input, ICLTensor *output, uin configure(CLKernelLibrary::get().get_compile_context(), input, output, threshold, false_value, true_value, type, upper); } -void CLThresholdKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, uint8_t threshold, +void CLThresholdKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, uint8_t threshold, uint8_t false_value, uint8_t true_value, ThresholdType type, uint8_t upper) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); diff --git a/src/core/CL/kernels/CLTileKernel.cpp b/src/core/CL/kernels/CLTileKernel.cpp index 5db69d32e1..14e30ec5b1 100644 --- a/src/core/CL/kernels/CLTileKernel.cpp +++ b/src/core/CL/kernels/CLTileKernel.cpp @@ -72,7 +72,7 @@ void CLTileKernel::configure(const ICLTensor *input, ICLTensor *output, const Mu configure(CLKernelLibrary::get().get_compile_context(), input, output, multiples); } -void CLTileKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Multiples &multiples) +void CLTileKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Multiples &multiples) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLTransposeKernel.cpp b/src/core/CL/kernels/CLTransposeKernel.cpp index 37f07e65a4..a28b685cb2 100644 --- a/src/core/CL/kernels/CLTransposeKernel.cpp +++ b/src/core/CL/kernels/CLTransposeKernel.cpp @@ -111,7 +111,7 @@ void CLTransposeKernel::configure(const ICLTensor *input, ICLTensor *output) configure(CLKernelLibrary::get().get_compile_context(), input, output); } -void CLTransposeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) +void CLTransposeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLUpsampleLayerKernel.cpp b/src/core/CL/kernels/CLUpsampleLayerKernel.cpp index 8df6d5dec4..dd6f85fe12 100644 --- a/src/core/CL/kernels/CLUpsampleLayerKernel.cpp +++ b/src/core/CL/kernels/CLUpsampleLayerKernel.cpp @@ -69,7 +69,7 @@ void CLUpsampleLayerKernel::configure(const ICLTensor *input, ICLTensor *output, configure(CLKernelLibrary::get().get_compile_context(), input, output, info, upsampling_policy); } -void CLUpsampleLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &info, const InterpolationPolicy upsampling_policy) +void CLUpsampleLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &info, const InterpolationPolicy upsampling_policy) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_UNUSED(upsampling_policy); diff --git a/src/core/CL/kernels/CLWarpAffineKernel.cpp b/src/core/CL/kernels/CLWarpAffineKernel.cpp index 43bd34fcea..c40c614687 100644 --- a/src/core/CL/kernels/CLWarpAffineKernel.cpp +++ b/src/core/CL/kernels/CLWarpAffineKernel.cpp @@ -64,7 +64,7 @@ void CLWarpAffineKernel::configure(const ICLTensor *input, ICLTensor *output, co configure(CLKernelLibrary::get().get_compile_context(), input, output, matrix, policy); } -void CLWarpAffineKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array &matrix, InterpolationPolicy policy) +void CLWarpAffineKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array &matrix, InterpolationPolicy policy) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); diff --git a/src/core/CL/kernels/CLWarpPerspectiveKernel.cpp b/src/core/CL/kernels/CLWarpPerspectiveKernel.cpp index 3c47567203..bc08549b55 100644 --- a/src/core/CL/kernels/CLWarpPerspectiveKernel.cpp +++ b/src/core/CL/kernels/CLWarpPerspectiveKernel.cpp @@ -63,7 +63,7 @@ void CLWarpPerspectiveKernel::configure(const ICLTensor *input, ICLTensor *outpu configure(CLKernelLibrary::get().get_compile_context(), input, output, matrix, policy); } -void CLWarpPerspectiveKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array &matrix, InterpolationPolicy policy) +void CLWarpPerspectiveKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const std::array &matrix, InterpolationPolicy policy) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); diff --git a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp b/src/core/CL/kernels/CLWeightsReshapeKernel.cpp index a0db660414..e1da4f03ae 100644 --- a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp +++ b/src/core/CL/kernels/CLWeightsReshapeKernel.cpp @@ -82,7 +82,7 @@ void CLWeightsReshapeKernel::configure(const ICLTensor *input, const ICLTensor * configure(CLKernelLibrary::get().get_compile_context(), input, biases, output, num_groups); } -void CLWeightsReshapeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups) +void CLWeightsReshapeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp b/src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp index ea549e9f46..aba2af1bb7 100644 --- a/src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp +++ b/src/core/CL/kernels/CLWidthConcatenate2TensorsKernel.cpp @@ -99,7 +99,7 @@ void CLWidthConcatenate2TensorsKernel::configure(const ICLTensor *input1, const configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output); } -void CLWidthConcatenate2TensorsKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output) +void CLWidthConcatenate2TensorsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info())); diff --git a/src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp b/src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp index e1ec9d1344..e5eb8b3f55 100644 --- a/src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp +++ b/src/core/CL/kernels/CLWidthConcatenate4TensorsKernel.cpp @@ -117,7 +117,7 @@ void CLWidthConcatenate4TensorsKernel::configure(const ICLTensor *input1, const configure(CLKernelLibrary::get().get_compile_context(), input1, input2, input3, input4, output); } -void CLWidthConcatenate4TensorsKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, const ICLTensor *input3, const ICLTensor *input4, +void CLWidthConcatenate4TensorsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, const ICLTensor *input3, const ICLTensor *input4, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, input3, input4, output); diff --git a/src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp index 9ff373b18d..8eba293487 100644 --- a/src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp +++ b/src/core/CL/kernels/CLWidthConcatenateLayerKernel.cpp @@ -95,7 +95,7 @@ void CLWidthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned i configure(CLKernelLibrary::get().get_compile_context(), input, width_offset, output); } -void CLWidthConcatenateLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int width_offset, ICLTensor *output) +void CLWidthConcatenateLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int width_offset, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), width_offset, output->info())); diff --git a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp index 38649126b7..6ced0a1778 100644 --- a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp +++ b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp @@ -104,7 +104,7 @@ void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTenso configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info); } -void CLWinogradFilterTransformKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info) +void CLWinogradFilterTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp index cf882ae9ac..09154536ef 100644 --- a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp +++ b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp @@ -113,7 +113,7 @@ void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info); } -void CLWinogradInputTransformKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info) +void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info)); diff --git a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp index f08b5ac7c8..96383ff11d 100644 --- a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp +++ b/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp @@ -140,7 +140,7 @@ void CLWinogradOutputTransformKernel::configure(const ICLTensor *input, const IC configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, winograd_info, act_info); } -void CLWinogradOutputTransformKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info, +void CLWinogradOutputTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLYOLOLayerKernel.cpp b/src/core/CL/kernels/CLYOLOLayerKernel.cpp index ee119233a4..3a9f822eae 100644 --- a/src/core/CL/kernels/CLYOLOLayerKernel.cpp +++ b/src/core/CL/kernels/CLYOLOLayerKernel.cpp @@ -105,7 +105,7 @@ void CLYOLOLayerKernel::configure(ICLTensor *input, ICLTensor *output, const Act configure(CLKernelLibrary::get().get_compile_context(), input, output, act_info, num_classes); } -void CLYOLOLayerKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes) +void CLYOLOLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes) { ARM_COMPUTE_ERROR_ON_NULLPTR(input); -- cgit v1.2.1