From 679fc96d8c1f8ce2a880f94a9b592f94bc472241 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Tue, 21 Apr 2020 16:08:53 +0100 Subject: COMPMID-3280: Make all ML primitives for CL use the new interface - Part1 - Fix2 - const fix in the CLKernels part 2 Change-Id: Ia12845e291b4137cbaf76eb8438e381c4fd0368a Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3071 Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- .../core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h | 2 +- .../core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h | 2 +- .../CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h | 2 +- arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h | 2 +- .../CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h | 2 +- .../CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h | 2 +- .../core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h | 2 +- .../CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h | 2 +- .../CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h | 2 +- .../CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h | 2 +- arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h | 6 +++--- arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h | 2 +- arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h | 2 +- arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h | 2 +- arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h | 2 +- .../core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h | 2 +- arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h | 2 +- arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h | 2 +- arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h | 2 +- arm_compute/core/CL/kernels/CLGatherKernel.h | 2 +- arm_compute/core/CL/kernels/CLGaussian3x3Kernel.h | 2 +- arm_compute/core/CL/kernels/CLGaussian5x5Kernel.h | 4 ++-- arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h | 4 ++-- arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLHOGDescriptorKernel.h | 4 ++-- arm_compute/core/CL/kernels/CLHOGDetectorKernel.h | 2 +- arm_compute/core/CL/kernels/CLHarrisCornersKernel.h | 2 +- arm_compute/core/CL/kernels/CLHeightConcatenateLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLHistogramKernel.h | 4 ++-- arm_compute/core/CL/kernels/CLIm2ColKernel.h | 2 +- arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLIntegralImageKernel.h | 4 ++-- arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLLKTrackerKernel.h | 8 ++++---- .../core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.h | 2 +- arm_compute/core/CL/kernels/CLMagnitudePhaseKernel.h | 2 +- arm_compute/core/CL/kernels/CLMeanStdDevKernel.h | 2 +- arm_compute/core/CL/kernels/CLMeanStdDevNormalizationKernel.h | 2 +- arm_compute/core/CL/kernels/CLMedian3x3Kernel.h | 2 +- arm_compute/core/CL/kernels/CLMemsetKernel.h | 2 +- arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLMinMaxLocationKernel.h | 4 ++-- arm_compute/core/CL/kernels/CLNonLinearFilterKernel.h | 2 +- arm_compute/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.h | 2 +- arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLPadLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLPermuteKernel.h | 2 +- arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h | 4 ++-- arm_compute/core/CL/kernels/CLPoolingLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLPriorBoxLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h | 2 +- arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h | 2 +- arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h | 2 +- src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp | 2 +- .../CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp | 2 +- .../CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp | 2 +- .../CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp | 2 +- .../CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp | 2 +- .../CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp | 2 +- .../CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp | 4 ++-- src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp | 2 +- src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp | 2 +- src/core/CL/kernels/CLGatherKernel.cpp | 2 +- src/core/CL/kernels/CLGaussian3x3Kernel.cpp | 2 +- src/core/CL/kernels/CLGaussian5x5Kernel.cpp | 4 ++-- src/core/CL/kernels/CLGaussianPyramidKernel.cpp | 4 ++-- src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp | 2 +- src/core/CL/kernels/CLHOGDescriptorKernel.cpp | 4 ++-- src/core/CL/kernels/CLHOGDetectorKernel.cpp | 2 +- src/core/CL/kernels/CLHarrisCornersKernel.cpp | 2 +- src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp | 2 +- src/core/CL/kernels/CLHistogramKernel.cpp | 4 ++-- src/core/CL/kernels/CLIm2ColKernel.cpp | 2 +- src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp | 2 +- src/core/CL/kernels/CLIntegralImageKernel.cpp | 4 ++-- src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp | 2 +- src/core/CL/kernels/CLLKTrackerKernel.cpp | 8 ++++---- src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp | 2 +- src/core/CL/kernels/CLMagnitudePhaseKernel.cpp | 2 +- src/core/CL/kernels/CLMeanStdDevKernel.cpp | 2 +- src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp | 2 +- src/core/CL/kernels/CLMedian3x3Kernel.cpp | 2 +- src/core/CL/kernels/CLMemsetKernel.cpp | 2 +- src/core/CL/kernels/CLMinMaxLayerKernel.cpp | 2 +- src/core/CL/kernels/CLMinMaxLocationKernel.cpp | 4 ++-- src/core/CL/kernels/CLNonLinearFilterKernel.cpp | 2 +- src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp | 2 +- src/core/CL/kernels/CLNormalizationLayerKernel.cpp | 2 +- src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp | 2 +- src/core/CL/kernels/CLPadLayerKernel.cpp | 2 +- src/core/CL/kernels/CLPermuteKernel.cpp | 2 +- src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp | 4 ++-- src/core/CL/kernels/CLPoolingLayerKernel.cpp | 2 +- src/core/CL/kernels/CLPriorBoxLayerKernel.cpp | 2 +- src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp | 2 +- src/core/CL/kernels/CLQuantizationLayerKernel.cpp | 2 +- src/core/CL/kernels/CLROIAlignLayerKernel.cpp | 2 +- 108 files changed, 131 insertions(+), 131 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h index d100efdcb7..15fd20842e 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h @@ -72,7 +72,7 @@ public: * rhs_info.k0: same as lhs_info.k0 * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices */ - void configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, + void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyNativeKernel * diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h index 9e3b198c8c..43526b7c41 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h @@ -83,7 +83,7 @@ public: * * @note lhs_info.k0 must be equal to rhs_info.k0 */ - void configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, + void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyReshapedKernel * diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h index 7beb5bb1c6..1aba6c0398 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h @@ -100,7 +100,7 @@ public: * @param[in] output_shifts (Optional) Output shifts tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM). * Supported data types: S32. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info, const ICLTensor *vector_sum_col = nullptr, + void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info, const ICLTensor *vector_sum_col = nullptr, const ICLTensor *vector_sum_row = nullptr, const ICLTensor *bias = nullptr, const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel * diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h index f9ec558d85..bc982c6120 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h @@ -84,7 +84,7 @@ public: * @param[in] a_offset Offset to be added to each element of the matrix A. * @param[in] b_offset Offset to be added to each element of the matrix B. */ - void configure(CLCompileContext &compile_context, ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset, + void configure(const CLCompileContext &compile_context, ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset, int32_t b_offset); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpOffsetContributionKernel * diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h index 032539b699..583b388d45 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h @@ -91,7 +91,7 @@ public: * @param[in] output_shifts Output shifts tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM). * Supported data types: S32 */ - void configure(CLCompileContext &compile_context, const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output, int32_t k, + void configure(const CLCompileContext &compile_context, const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output, int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ICLTensor *output_multipliers, const ICLTensor *output_shifts); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpOffsetContributionKernel diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h index dd85d8a97c..1e9fde8376 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h @@ -76,7 +76,7 @@ public: * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED * @param[in] info Output stage info. Used to pass the quantized output data type */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *info); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel * * @param[in] input Input tensor. Data type supported: S32 diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h index f36076dfa2..766ef9a820 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h @@ -76,7 +76,7 @@ public: * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED * @param[in] output_stage GEMMLowp output stage metadata. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleKernel * * @param[in] input Input tensor. Data type supported: S32 diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h index 36cd7bf693..6f58150037 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h @@ -81,7 +81,7 @@ public: * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16. * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to 0. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int min = 0, int max = 0); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int min = 0, int max = 0); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel * * @param[in] input Input tensor info. Data type supported: S32 diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h index fd95e00d5d..0c237be34c 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h @@ -84,7 +84,7 @@ public: * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8_SIGNED. Defaults to 0 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, int min = 0, int max = 0); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel * diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h index 1714a02f76..cb3e12e34d 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h @@ -84,7 +84,7 @@ public: * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, * Along with @p min, this value can be used to implement "rectified linear unit" activation functions */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, int min = 0, int max = 0); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel * diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h index 4b610fa6d0..857b1c7952 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h @@ -68,7 +68,7 @@ public: * - scalar Scalar value to multiply each reduced column/row by. * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value. */ - virtual void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLowpReductionKernelInfo &info) = 0; + virtual void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLowpReductionKernelInfo &info) = 0; protected: const ICLTensor *_input; @@ -105,7 +105,7 @@ public: * - scalar Scalar value to multiply each reduced column/row by. * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value. */ - void configure(CLCompileContext &compile_context, const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info) override; + void configure(const CLCompileContext &compile_context, const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info) override; /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixAReductionKernel * * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED @@ -154,7 +154,7 @@ public: * - scalar Scalar value to multiply each reduced column/row by. * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value. */ - void configure(CLCompileContext &compile_context, const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info) override; + void configure(const CLCompileContext &compile_context, const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info) override; /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixBReductionKernel * * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h index 037ec4d116..df2f6f4ad1 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h @@ -56,7 +56,7 @@ public: * @param[in, out] accum The accumulate tensor to convert. Data types supported: F16/F32 * @param[in] biases The shared biases tensor to append. It must be 1D tensor. Data types supported: Same as @p input */ - void configure(CLCompileContext &compile_context, ICLTensor *accum, const ICLTensor *biases); + void configure(const CLCompileContext &compile_context, ICLTensor *accum, const ICLTensor *biases); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixAccumulateBiasesKernel * * @param[in] accum The accumulate tensor to convert. Data types supported: F16/F32 diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h index fe34735fe4..6085b34bcb 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h @@ -83,7 +83,7 @@ public: * @param[in] activation_info (Optional) Activation to apply after the matrix multiplication * */ - void configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta = 0.f, + void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta = 0.f, bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo(), bool fp_mixed_precision = false, const ActivationLayerInfo &activation_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyKernel * diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h index 370ef8b3c8..c711a3d1f9 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h @@ -82,7 +82,7 @@ public: * rhs_info.k0: same of lhs_info.k0 * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices */ - void configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, + void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info); diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h index 45df67673c..ee8e57fa8c 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h @@ -101,7 +101,7 @@ public: * * @note lhs_info.k0 must be equal to rhs_info.k0 */ - void configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, + void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info); diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h index b6285dd4db..f7d314a039 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h @@ -85,7 +85,7 @@ public: * rhs_info.transpose: true,false * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices */ - void configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, + void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info); diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h index f31c5c2280..6d70b4b0c2 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h @@ -58,7 +58,7 @@ public: * @param[in] input1 The 2D reshaped weights tensor. Data type supported: Same as @p input. * @param[out] output The output 2D tensor. Data types supported: Same as @p input, S32 for QASYMM8/QASYMM8_SIGNED. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixVectorMultiplyKernel * * @param[in] input0 The reshaped input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 diff --git a/arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h b/arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h index e8e02ac281..fe77fcb428 100644 --- a/arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h @@ -75,7 +75,7 @@ public: * lhs_info.interleave: true, false * @param[in] reinterpret_input_as_3d (Optional) True if the input has to be reinterpreted as 3D tensor */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMReshapeLHSMatrixKernel * * @param[in] input Input tensor info. Data types supported: All diff --git a/arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h b/arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h index ada8889ac0..0e6352bdbb 100644 --- a/arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h @@ -72,7 +72,7 @@ public: * rhs_info.transpose: true, false * rhs_info.interleave: true, false */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMReshapeRHSMatrixKernel * * @param[in] input Input tensor info. Data types supported: All diff --git a/arm_compute/core/CL/kernels/CLGatherKernel.h b/arm_compute/core/CL/kernels/CLGatherKernel.h index c91b95de89..b7539536e9 100644 --- a/arm_compute/core/CL/kernels/CLGatherKernel.h +++ b/arm_compute/core/CL/kernels/CLGatherKernel.h @@ -63,7 +63,7 @@ public: * @param[out] output Destination tensor. Data type supported: Same as @p input * @param[in] axis (Optional) The axis in @p input to gather @p indices from. Negative values wrap around. Defaults to 0 */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis = 0); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis = 0); /** Static function to check if given info will lead to a valid configuration of @ref CLGatherKernel * diff --git a/arm_compute/core/CL/kernels/CLGaussian3x3Kernel.h b/arm_compute/core/CL/kernels/CLGaussian3x3Kernel.h index 7eb7f7ae89..6a9d3eaa4d 100644 --- a/arm_compute/core/CL/kernels/CLGaussian3x3Kernel.h +++ b/arm_compute/core/CL/kernels/CLGaussian3x3Kernel.h @@ -50,7 +50,7 @@ public: * @param[out] output The output tensor. Data types supported: U8. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); // Inherited methods overridden: BorderSize border_size() const override; diff --git a/arm_compute/core/CL/kernels/CLGaussian5x5Kernel.h b/arm_compute/core/CL/kernels/CLGaussian5x5Kernel.h index 37a7727d7a..d8730e0c92 100644 --- a/arm_compute/core/CL/kernels/CLGaussian5x5Kernel.h +++ b/arm_compute/core/CL/kernels/CLGaussian5x5Kernel.h @@ -48,7 +48,7 @@ public: * @param[out] output Destination tensor. Data types supported: S16. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); private: //Make the configure method of the parent class private @@ -73,7 +73,7 @@ public: * @param[out] output Destination tensor. Data types supported: U8. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); private: //Make the configure method of the parent class private diff --git a/arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h b/arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h index 5acd7fd9b6..34cd062dae 100644 --- a/arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h +++ b/arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h @@ -59,7 +59,7 @@ public: * @param[in] input Source tensor. Data types supported: U8. * @param[out] output Destination tensor. Output should have half the input width. Data types supported: U16. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -98,7 +98,7 @@ public: * @param[in] input Source tensor. Data types supported: U16. * @param[out] output Destination tensor. Output should have half the input height. Data types supported: U8. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h b/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h index abac4b74fe..46dc16d6d5 100644 --- a/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h @@ -62,7 +62,7 @@ public: * @param[in] info Contains Compute Anchors operation information described in @ref ComputeAnchorsInfo * */ - void configure(CLCompileContext &compile_context, const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info); + void configure(const CLCompileContext &compile_context, const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info); /** Static function to check if given info will lead to a valid configuration of @ref CLComputeAllAnchorsKernel * diff --git a/arm_compute/core/CL/kernels/CLHOGDescriptorKernel.h b/arm_compute/core/CL/kernels/CLHOGDescriptorKernel.h index 1b1610e328..046950551d 100644 --- a/arm_compute/core/CL/kernels/CLHOGDescriptorKernel.h +++ b/arm_compute/core/CL/kernels/CLHOGDescriptorKernel.h @@ -65,7 +65,7 @@ public: * @param[out] output Output tensor which stores the local HOG for each cell. DataType supported: F32. Number of channels supported: equal to the number of histogram bins per cell * @param[in] hog_info HOG's metadata */ - void configure(CLCompileContext &compile_context, const ICLTensor *input_magnitude, const ICLTensor *input_phase, ICLTensor *output, const HOGInfo *hog_info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input_magnitude, const ICLTensor *input_phase, ICLTensor *output, const HOGInfo *hog_info); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -108,7 +108,7 @@ public: * @param[out] output Output tensor which stores the normalised blocks. Data type supported: F32. Number of channels supported: equal to the number of histogram bins per block * @param[in] hog_info HOG's metadata */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const HOGInfo *hog_info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const HOGInfo *hog_info); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLHOGDetectorKernel.h b/arm_compute/core/CL/kernels/CLHOGDetectorKernel.h index 8a326429a2..681c212cc5 100644 --- a/arm_compute/core/CL/kernels/CLHOGDetectorKernel.h +++ b/arm_compute/core/CL/kernels/CLHOGDetectorKernel.h @@ -80,7 +80,7 @@ public: * @param[in] threshold (Optional) Threshold for the distance between features and SVM classifying plane * @param[in] idx_class (Optional) Index of the class used for evaluating which class the detection window belongs to */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLHOG *hog, ICLDetectionWindowArray *detection_windows, cl::Buffer *num_detection_windows, + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLHOG *hog, ICLDetectionWindowArray *detection_windows, cl::Buffer *num_detection_windows, const Size2D &detection_window_stride, float threshold = 0.0f, uint16_t idx_class = 0); diff --git a/arm_compute/core/CL/kernels/CLHarrisCornersKernel.h b/arm_compute/core/CL/kernels/CLHarrisCornersKernel.h index ed91aafb5e..a13119b82c 100644 --- a/arm_compute/core/CL/kernels/CLHarrisCornersKernel.h +++ b/arm_compute/core/CL/kernels/CLHarrisCornersKernel.h @@ -79,7 +79,7 @@ public: * @param[in] sensitivity Sensitivity threshold k from the Harris-Stephens equation. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLImage *input1, const ICLImage *input2, ICLImage *output, + void configure(const CLCompileContext &compile_context, const ICLImage *input1, const ICLImage *input2, ICLImage *output, int32_t block_size, float norm_factor, float strength_thresh, float sensitivity, bool border_undefined); diff --git a/arm_compute/core/CL/kernels/CLHeightConcatenateLayerKernel.h b/arm_compute/core/CL/kernels/CLHeightConcatenateLayerKernel.h index b9589593fa..524e5ea997 100644 --- a/arm_compute/core/CL/kernels/CLHeightConcatenateLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLHeightConcatenateLayerKernel.h @@ -66,7 +66,7 @@ public: * @param[out] output Output tensor. Data types supported: Same as @p input. * */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int height_offset, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int height_offset, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLHeightConcatenateLayerKernel * * @param[in] input Input tensor info. Data types supported: All. diff --git a/arm_compute/core/CL/kernels/CLHistogramKernel.h b/arm_compute/core/CL/kernels/CLHistogramKernel.h index bb0d0b3c3c..9cd374711b 100644 --- a/arm_compute/core/CL/kernels/CLHistogramKernel.h +++ b/arm_compute/core/CL/kernels/CLHistogramKernel.h @@ -60,7 +60,7 @@ public: * @param[in] input Source image. Data types supported: U8. * @param[out] output Destination distribution. */ - void configure(CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output); + void configure(const CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -98,7 +98,7 @@ public: * @param[in] input Source image. Data types supported: U8. * @param[out] output Destination distribution. */ - void configure(CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output); + void configure(const CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLIm2ColKernel.h b/arm_compute/core/CL/kernels/CLIm2ColKernel.h index dddbf8d9dd..1ccac5b7e9 100644 --- a/arm_compute/core/CL/kernels/CLIm2ColKernel.h +++ b/arm_compute/core/CL/kernels/CLIm2ColKernel.h @@ -93,7 +93,7 @@ public: * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation = Size2D(1U, 1U), unsigned int num_groups = 1); /** Static function to check if given info will lead to a valid configuration of @ref CLIm2ColKernel diff --git a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h index 93490d8e12..014dce1759 100644 --- a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h @@ -66,7 +66,7 @@ public: * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input. * @param[in] info Kernel meta-data descriptor */ - void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info); + void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info); /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer. * diff --git a/arm_compute/core/CL/kernels/CLIntegralImageKernel.h b/arm_compute/core/CL/kernels/CLIntegralImageKernel.h index 8e06887dba..6b6076a917 100644 --- a/arm_compute/core/CL/kernels/CLIntegralImageKernel.h +++ b/arm_compute/core/CL/kernels/CLIntegralImageKernel.h @@ -47,7 +47,7 @@ public: * @param[in] input An input tensor. Data types supported: U8 * @param[out] output Destination tensor, Data types supported: U32. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); }; /** Interface to run the vertical pass of the integral image kernel. */ @@ -74,7 +74,7 @@ public: * @param[in] compile_context The compile context to be used. * @param[in,out] in_out The input/output tensor. Data types supported: U32 */ - void configure(CLCompileContext &compile_context, ICLTensor *in_out); + void configure(const CLCompileContext &compile_context, ICLTensor *in_out); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h b/arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h index e4b7af7984..169910b70d 100644 --- a/arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h @@ -70,7 +70,7 @@ public: * @param[in] axis Axis along which to reduce. Negative values wrap around. Maximum supported actual reduction axis : 2 * @param[in] epsilon Lower bound value for the normalization. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon); /** Static function to check if given info will lead to a valid configuration of @ref CLL2NormalizeLayerKernel. * diff --git a/arm_compute/core/CL/kernels/CLLKTrackerKernel.h b/arm_compute/core/CL/kernels/CLLKTrackerKernel.h index 3e938c9658..f94602c381 100644 --- a/arm_compute/core/CL/kernels/CLLKTrackerKernel.h +++ b/arm_compute/core/CL/kernels/CLLKTrackerKernel.h @@ -99,7 +99,7 @@ public: * @param[in] num_levels The number of pyramid levels * @param[in] pyramid_scale Scale factor used for generating the pyramid */ - void configure(CLCompileContext &compile_context, const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates, + void configure(const CLCompileContext &compile_context, const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates, ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal, bool use_initial_estimate, size_t level, size_t num_levels, float pyramid_scale); @@ -123,7 +123,7 @@ public: * @param[in] new_points_internal Pointer to the array of internal @ref CLLKInternalKeypoint new points * @param[out] new_points Pointer to the @ref ICLKeyPointArray storing new key points */ - void configure(CLCompileContext &compile_context, ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points); + void configure(const CLCompileContext &compile_context, ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -172,7 +172,7 @@ public: * @param[in] window_dimension The size of the window on which to perform the algorithm * @param[in] level The pyramid level */ - void configure(CLCompileContext &compile_context, const ICLTensor *old_input, const ICLTensor *old_scharr_gx, const ICLTensor *old_scharr_gy, + void configure(const CLCompileContext &compile_context, const ICLTensor *old_input, const ICLTensor *old_scharr_gx, const ICLTensor *old_scharr_gy, ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival, size_t window_dimension, size_t level); @@ -227,7 +227,7 @@ public: * @param[in] window_dimension The size of the window on which to perform the algorithm * @param[in] level The pyramid level */ - void configure(CLCompileContext &compile_context, const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival, + void configure(const CLCompileContext &compile_context, const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival, Termination termination, float epsilon, size_t num_iterations, size_t window_dimension, size_t level); // Inherited methods overridden: diff --git a/arm_compute/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.h index 757e3e4f86..e68160f96d 100644 --- a/arm_compute/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.h +++ b/arm_compute/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.h @@ -62,7 +62,7 @@ public: * @param[in] input1 Second input tensor. Data type supported: same as @p input0 * @param[out] output Output tensor to store the result. Data type supported: same as @p input0 */ - void configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLLocallyConnectedMatrixMultiplyKernel * * @param[in] input0 First input tensor info. Data types supported: F32 diff --git a/arm_compute/core/CL/kernels/CLMagnitudePhaseKernel.h b/arm_compute/core/CL/kernels/CLMagnitudePhaseKernel.h index 390da4958d..e0de3e7636 100644 --- a/arm_compute/core/CL/kernels/CLMagnitudePhaseKernel.h +++ b/arm_compute/core/CL/kernels/CLMagnitudePhaseKernel.h @@ -72,7 +72,7 @@ public: * @param[in] mag_type (Optional) Magnitude calculation type. Default: L2NORM. * @param[in] phase_type (Optional) Phase calculation type. Default: SIGNED. */ - void configure(CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, + void configure(const CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, MagnitudeType mag_type = MagnitudeType::L2NORM, PhaseType phase_type = PhaseType::SIGNED); // Inherited methods overridden: diff --git a/arm_compute/core/CL/kernels/CLMeanStdDevKernel.h b/arm_compute/core/CL/kernels/CLMeanStdDevKernel.h index ed0213abcc..96b4c4ea60 100644 --- a/arm_compute/core/CL/kernels/CLMeanStdDevKernel.h +++ b/arm_compute/core/CL/kernels/CLMeanStdDevKernel.h @@ -68,7 +68,7 @@ public: * @param[out] stddev (Optional) Output standard deviation of pixel values. * @param[out] global_sum_squared (Optional if stddev is not set, required if stddev is set) Keeps global sum of squared pixel values (Buffer size: 1 cl_ulong). */ - void configure(CLCompileContext &compile_context, const ICLImage *input, float *mean, cl::Buffer *global_sum, float *stddev = nullptr, cl::Buffer *global_sum_squared = nullptr); + void configure(const CLCompileContext &compile_context, const ICLImage *input, float *mean, cl::Buffer *global_sum, float *stddev = nullptr, cl::Buffer *global_sum_squared = nullptr); /** Static function to check if given info will lead to a valid configuration of @ref CLMeanStdDevKernel. * * @param[in] input Input image info. Data types supported: U8. diff --git a/arm_compute/core/CL/kernels/CLMeanStdDevNormalizationKernel.h b/arm_compute/core/CL/kernels/CLMeanStdDevNormalizationKernel.h index a21a6eed73..ff0c96e168 100644 --- a/arm_compute/core/CL/kernels/CLMeanStdDevNormalizationKernel.h +++ b/arm_compute/core/CL/kernels/CLMeanStdDevNormalizationKernel.h @@ -66,7 +66,7 @@ public: * @param[out] output (Optional) Destination tensor. It can be nullptr in case of in-place computation. Data type supported: same as @p input * @param[in] epsilon (Optional) Small float to avoid division by zero in case of zero standard deviation. Defaults to 1e-8. */ - void configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output = nullptr, float epsilon = 1e-8f); + void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output = nullptr, float epsilon = 1e-8f); /** Static function to check if given info will lead to a valid configuration of @ref CLMeanStdDevNormalizationKernel * * @param[in] input Source tensor info with 2 dimensions. In case of @p output tensor info = nullptr, diff --git a/arm_compute/core/CL/kernels/CLMedian3x3Kernel.h b/arm_compute/core/CL/kernels/CLMedian3x3Kernel.h index df40fcf7e9..c68ab07781 100644 --- a/arm_compute/core/CL/kernels/CLMedian3x3Kernel.h +++ b/arm_compute/core/CL/kernels/CLMedian3x3Kernel.h @@ -50,7 +50,7 @@ public: * @param[out] output The output tensor. Data types supported: U8. * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); // Inherited methods overridden: BorderSize border_size() const override; diff --git a/arm_compute/core/CL/kernels/CLMemsetKernel.h b/arm_compute/core/CL/kernels/CLMemsetKernel.h index a2e61a1782..430bc1d4f2 100644 --- a/arm_compute/core/CL/kernels/CLMemsetKernel.h +++ b/arm_compute/core/CL/kernels/CLMemsetKernel.h @@ -63,7 +63,7 @@ public: * @param[in] constant_value The value used to fill the planes of the tensor * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr. */ - void configure(CLCompileContext &compile_context, ICLTensor *tensor, const PixelValue &constant_value, Window *window = nullptr); + void configure(const CLCompileContext &compile_context, ICLTensor *tensor, const PixelValue &constant_value, Window *window = nullptr); /** Static function to check if given info will lead to a valid configuration of @ref CLMemsetKernel * * @param[in] tensor Source tensor info. Data types supported: All. diff --git a/arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h b/arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h index 7a31d71553..5f9685f303 100644 --- a/arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h @@ -59,7 +59,7 @@ public: * @param[out] output Output tensor with shape [2, batches, ...] which stores the minimum and maximum values for each 3D input tensor. * The dimensions over the second must match the batched dimensions of the input tensor. Data types supported: F32. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLMinMaxLayerKernel * * @param[in] input Input tensor info. Data types supported: F32. diff --git a/arm_compute/core/CL/kernels/CLMinMaxLocationKernel.h b/arm_compute/core/CL/kernels/CLMinMaxLocationKernel.h index e57f7587fa..afb134fa59 100644 --- a/arm_compute/core/CL/kernels/CLMinMaxLocationKernel.h +++ b/arm_compute/core/CL/kernels/CLMinMaxLocationKernel.h @@ -61,7 +61,7 @@ public: * @param[in] input Input Image. Data types supported: U8/S16/F32. * @param[out] min_max Buffer of 2 elements to store the min value at position 0 and the max value at position 1. Data type supported: S32 if input type is U8/S16, F32 if input type is F32. */ - void configure(CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max); + void configure(const CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -110,7 +110,7 @@ public: * @param[out] min_loc (Optional) Array of Coordinates2D used to store minimum value locations. * @param[out] max_loc (Optional) Array of Coordinates2D used to store maximum value locations. */ - void configure(CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max, cl::Buffer *min_max_count, + void configure(const CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max, cl::Buffer *min_max_count, ICLCoordinates2DArray *min_loc = nullptr, ICLCoordinates2DArray *max_loc = nullptr); // Inherited methods overridden: diff --git a/arm_compute/core/CL/kernels/CLNonLinearFilterKernel.h b/arm_compute/core/CL/kernels/CLNonLinearFilterKernel.h index b255f0cb90..1f337356e9 100644 --- a/arm_compute/core/CL/kernels/CLNonLinearFilterKernel.h +++ b/arm_compute/core/CL/kernels/CLNonLinearFilterKernel.h @@ -63,7 +63,7 @@ public: * @param[in] mask The given mask. Will be used only if pattern is specified to PATTERN_OTHER * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NonLinearFilterFunction function, + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NonLinearFilterFunction function, unsigned int mask_size, MatrixPattern pattern, const uint8_t *mask, bool border_undefined); diff --git a/arm_compute/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.h b/arm_compute/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.h index 084c77bf26..a256bc798d 100644 --- a/arm_compute/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.h +++ b/arm_compute/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.h @@ -51,7 +51,7 @@ public: * @param[out] output Destination tensor. Data types supported: U8, F32. (Must be the same as the input tensor) * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined); // Inherited methods overridden: BorderSize border_size() const override; diff --git a/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h index 350b504d50..2511818ef2 100644 --- a/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h @@ -63,7 +63,7 @@ public: * Data layouts supported: same as @p input. * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info); /** Static function to check if given info will lead to a valid configuration of @ref CLNormalizationLayerKernel * * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM], diff --git a/arm_compute/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h b/arm_compute/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h index addd3942eb..d247e1fddc 100644 --- a/arm_compute/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h @@ -67,7 +67,7 @@ public: * @param[in] std Standard deviation values tensor. 1 dimension with size equal to the number of input channels. * Data types supported: same as @p input */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std); /** Static function to check if given info will lead to a valid configuration of @ref CLNormalizePlanarYUVLayerKernel * * @param[in] input Source tensor info. 3 lower dimensions represent a single input with dimensions [width, height, channels]. diff --git a/arm_compute/core/CL/kernels/CLPadLayerKernel.h b/arm_compute/core/CL/kernels/CLPadLayerKernel.h index 09f72088c4..f051774d84 100644 --- a/arm_compute/core/CL/kernels/CLPadLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLPadLayerKernel.h @@ -69,7 +69,7 @@ public: * @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT, * or reflect the input, either including the border values (SYMMETRIC) or not (REFLECT). */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value = PixelValue(), + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value = PixelValue(), PaddingMode mode = PaddingMode::CONSTANT); /** Static function to check if given info will lead to a valid configuration of @ref CLPadLayerKernel * diff --git a/arm_compute/core/CL/kernels/CLPermuteKernel.h b/arm_compute/core/CL/kernels/CLPermuteKernel.h index 6414edb113..1a9240ef6b 100644 --- a/arm_compute/core/CL/kernels/CLPermuteKernel.h +++ b/arm_compute/core/CL/kernels/CLPermuteKernel.h @@ -65,7 +65,7 @@ public: * @param[in] output The output tensor. Data types supported: Same as @p input * @param[in] perm Permutation vector */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PermutationVector &perm); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PermutationVector &perm); /** Static function to check if given info will lead to a valid configuration of @ref CLPermuteKernel * * @note Arbitrary permutation vectors are supported with rank not greater than 4 diff --git a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h index a9cfcc57de..52a09d9a49 100644 --- a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h +++ b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h @@ -79,7 +79,7 @@ public: * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale, + void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplicationKernel * @@ -145,7 +145,7 @@ public: * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplicationKernel * * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2. diff --git a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h index 4ab6955110..395750440c 100644 --- a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h @@ -67,7 +67,7 @@ public: * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo. * @param[out] indices (optional) The indices of the maximal values. Data type supported: U32. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices = nullptr); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices = nullptr); /** Static function to check if given info will lead to a valid configuration of @ref CLPoolingLayerKernel * * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. diff --git a/arm_compute/core/CL/kernels/CLPriorBoxLayerKernel.h b/arm_compute/core/CL/kernels/CLPriorBoxLayerKernel.h index 89fd656581..5fd27d9233 100644 --- a/arm_compute/core/CL/kernels/CLPriorBoxLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLPriorBoxLayerKernel.h @@ -69,7 +69,7 @@ public: * @param[in] max Maximum prior box values * @param[in] aspect_ratios Aspect ratio values */ - void configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min, cl::Buffer *max, + void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min, cl::Buffer *max, cl::Buffer *aspect_ratios); /** Static function to check if given info will lead to a valid configuration of @ref CLPriorBoxLayerKernel * diff --git a/arm_compute/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h b/arm_compute/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h index 3206eda729..1a2f3111f5 100644 --- a/arm_compute/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h +++ b/arm_compute/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h @@ -63,7 +63,7 @@ public: * @param[in] weight Weight tensor. Data types supported: Same as @p input. * @param[in] bias Bias tensor. Data types supported: S32. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *weight, const ICLTensor *bias); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *weight, const ICLTensor *bias); /** Static function to check if given info will lead to a valid configuration of @ref CLQLSTMLayerNormalizationKernel * * @param[in] input Source tensor info with 2 dimensions. Data types supported: QSYMM16. diff --git a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h index a651529f2b..de30447e17 100644 --- a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h @@ -65,7 +65,7 @@ public: * * @note Output auto initialization is not supported by this kernel */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayerKernel * * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. diff --git a/arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h b/arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h index 8f4485a03b..30bdbb1844 100644 --- a/arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLROIAlignLayerKernel.h @@ -79,7 +79,7 @@ public: * @note The z dimensions of @p output tensor and @p input tensor must be the same. * @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois array. */ - void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info); /** Static function to check if given info will lead to a valid configuration of @ref CLROIAlignLayerKernel * * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp index bcf71565af..760a8622ab 100644 --- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp @@ -176,7 +176,7 @@ void CLGEMMLowpMatrixMultiplyNativeKernel::configure(const ICLTensor *input0, co configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, lhs_info, rhs_info, gemm_info); } -void CLGEMMLowpMatrixMultiplyNativeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, +void CLGEMMLowpMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info) { diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp index ebb00a45d5..eeedfdaab1 100644 --- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp @@ -171,7 +171,7 @@ void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, lhs_info, rhs_info, gemm_info); } -void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, +void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info) { diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp index ad675df7ea..0fdc899197 100644 --- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp @@ -320,7 +320,7 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *i configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output, gemm_info, vector_sum_col, vector_sum_row, bias, output_multipliers, output_shifts); } -void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info, +void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, const ICLTensor *output_multipliers, const ICLTensor *output_shifts) { diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp index fd2cc7a680..dc8eb76c23 100644 --- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp @@ -148,7 +148,7 @@ void CLGEMMLowpOffsetContributionKernel::configure(ICLTensor *mm_result, const I configure(CLKernelLibrary::get().get_compile_context(), mm_result, vector_sum_col, vector_sum_row, bias, k, a_offset, b_offset); } -void CLGEMMLowpOffsetContributionKernel::configure(CLCompileContext &compile_context, ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, +void CLGEMMLowpOffsetContributionKernel::configure(const CLCompileContext &compile_context, ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset, int32_t b_offset) { diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp index d52fb21574..26b318b0fd 100644 --- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp @@ -187,7 +187,7 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *m configure(CLKernelLibrary::get().get_compile_context(), mm_result, vector_sum_col, vector_sum_row, bias, output, k, a_offset, b_offset, output_stage, output_multipliers, output_shifts); } -void CLGEMMLowpOffsetContributionOutputStageKernel::configure(CLCompileContext &compile_context, const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, +void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const CLCompileContext &compile_context, const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output, int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ICLTensor *output_multipliers, const ICLTensor *output_shifts) diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp index 171dc48112..f9f4839688 100644 --- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp @@ -120,7 +120,7 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const ICLTensor *i configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, info); } -void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, +void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *info) { // Perform validate step diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp index ca85e8b655..2db7d6d22b 100644 --- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp @@ -110,7 +110,7 @@ void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const ICLTensor *input, c configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, output_stage); } -void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage) +void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage) { // Perform validate step ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp index 066307c4b2..2306b009bd 100644 --- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp @@ -121,7 +121,7 @@ void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::configure(const configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, result_fixedpoint_multiplier, result_shift, min, max); } -void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, +void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int min, int max) { diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp index b6d98e6749..b4a7cc9d90 100644 --- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp @@ -121,7 +121,7 @@ void CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::configure(const I configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); } -void CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, +void CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, int min, int max) { diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp index 7f2f2e75a9..3158d59948 100644 --- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp @@ -121,7 +121,7 @@ void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure(const configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); } -void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, +void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, int min, int max) { diff --git a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp index 9fa253a55a..44f8797cff 100644 --- a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp @@ -91,7 +91,7 @@ void CLGEMMLowpMatrixAReductionKernel::configure(const ICLTensor *mtx_a, ICLTens configure(CLKernelLibrary::get().get_compile_context(), mtx_a, vector_sum_row, info); } -void CLGEMMLowpMatrixAReductionKernel::configure(CLCompileContext &compile_context, const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info) +void CLGEMMLowpMatrixAReductionKernel::configure(const CLCompileContext &compile_context, const ICLTensor *mtx_a, ICLTensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info) { // Perform validate step ARM_COMPUTE_ERROR_ON_NULLPTR(mtx_a, vector_sum_row); @@ -168,7 +168,7 @@ void CLGEMMLowpMatrixBReductionKernel::configure(const ICLTensor *mtx_b, ICLTens configure(CLKernelLibrary::get().get_compile_context(), mtx_b, vector_sum_col, info); } -void CLGEMMLowpMatrixBReductionKernel::configure(CLCompileContext &compile_context, const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info) +void CLGEMMLowpMatrixBReductionKernel::configure(const CLCompileContext &compile_context, const ICLTensor *mtx_b, ICLTensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info) { ARM_COMPUTE_ERROR_ON_NULLPTR(mtx_b, vector_sum_col); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_matrix_b_reduction(mtx_b->info(), vector_sum_col->info())); diff --git a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp index 045ae282d6..03cd1878aa 100644 --- a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp @@ -82,7 +82,7 @@ void CLGEMMMatrixAccumulateBiasesKernel::configure(ICLTensor *accum, const ICLTe configure(CLKernelLibrary::get().get_compile_context(), accum, biases); } -void CLGEMMMatrixAccumulateBiasesKernel::configure(CLCompileContext &compile_context, ICLTensor *accum, const ICLTensor *biases) +void CLGEMMMatrixAccumulateBiasesKernel::configure(const CLCompileContext &compile_context, ICLTensor *accum, const ICLTensor *biases) { // Perform validate step ARM_COMPUTE_ERROR_ON_NULLPTR(accum, biases); diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp index 9587a042be..d2c79543ad 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp @@ -310,7 +310,7 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, is_interleaved_transposed, reshape_info, fp_mixed_precision, activation_info); } -void CLGEMMMatrixMultiplyKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, +void CLGEMMMatrixMultiplyKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output); diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp index af4b097c72..d5a52845a1 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp @@ -217,7 +217,7 @@ void CLGEMMMatrixMultiplyNativeKernel::configure(const ICLTensor *input0, const configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info); } -void CLGEMMMatrixMultiplyNativeKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, +void CLGEMMMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info) diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp index eb01486087..09e4e98a87 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp @@ -219,7 +219,7 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, cons configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info); } -void CLGEMMMatrixMultiplyReshapedKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, +void CLGEMMMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info) diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp index 011e93d9b3..13f8152fb4 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp @@ -220,7 +220,7 @@ void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *input configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info); } -void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, +void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info) diff --git a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp index 98a1dee758..4e57259cd6 100644 --- a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp @@ -86,7 +86,7 @@ void CLGEMMMatrixVectorMultiplyKernel::configure(const ICLTensor *input0, const configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output); } -void CLGEMMMatrixVectorMultiplyKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output) +void CLGEMMMatrixVectorMultiplyKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info())); diff --git a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp index 73e3106ff8..3267a0e39e 100644 --- a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp +++ b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp @@ -124,7 +124,7 @@ void CLGEMMReshapeLHSMatrixKernel::configure(const ICLTensor *input, ICLTensor * configure(CLKernelLibrary::get().get_compile_context(), input, output, lhs_info, reinterpret_input_as_3d); } -void CLGEMMReshapeLHSMatrixKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d) +void CLGEMMReshapeLHSMatrixKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp index 1623b1e552..4217932097 100644 --- a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp +++ b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp @@ -105,7 +105,7 @@ void CLGEMMReshapeRHSMatrixKernel::configure(const ICLTensor *input, ICLTensor * configure(CLKernelLibrary::get().get_compile_context(), input, output, rhs_info); } -void CLGEMMReshapeRHSMatrixKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info) +void CLGEMMReshapeRHSMatrixKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLGatherKernel.cpp b/src/core/CL/kernels/CLGatherKernel.cpp index 6bee66ab93..07b9282879 100644 --- a/src/core/CL/kernels/CLGatherKernel.cpp +++ b/src/core/CL/kernels/CLGatherKernel.cpp @@ -92,7 +92,7 @@ void CLGatherKernel::configure(const ICLTensor *input, const ICLTensor *indices, configure(CLKernelLibrary::get().get_compile_context(), input, indices, output, axis); } -void CLGatherKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis) +void CLGatherKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, indices); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), indices->info(), output->info(), axis)); diff --git a/src/core/CL/kernels/CLGaussian3x3Kernel.cpp b/src/core/CL/kernels/CLGaussian3x3Kernel.cpp index 0edf46b506..210ffb9123 100644 --- a/src/core/CL/kernels/CLGaussian3x3Kernel.cpp +++ b/src/core/CL/kernels/CLGaussian3x3Kernel.cpp @@ -44,7 +44,7 @@ void CLGaussian3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, b configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined); } -void CLGaussian3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) +void CLGaussian3x3Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); diff --git a/src/core/CL/kernels/CLGaussian5x5Kernel.cpp b/src/core/CL/kernels/CLGaussian5x5Kernel.cpp index 98436b950f..cb864671db 100644 --- a/src/core/CL/kernels/CLGaussian5x5Kernel.cpp +++ b/src/core/CL/kernels/CLGaussian5x5Kernel.cpp @@ -32,7 +32,7 @@ void CLGaussian5x5HorKernel::configure(const ICLTensor *input, ICLTensor *output configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined); } -void CLGaussian5x5HorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) +void CLGaussian5x5HorKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) { const std::array matrix = { 1, 4, 6, 4, 1 }; @@ -45,7 +45,7 @@ void CLGaussian5x5VertKernel::configure(const ICLTensor *input, ICLTensor *outpu configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined); } -void CLGaussian5x5VertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) +void CLGaussian5x5VertKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) { const uint32_t scale = 256; const std::array matrix = { 1, 4, 6, 4, 1 }; diff --git a/src/core/CL/kernels/CLGaussianPyramidKernel.cpp b/src/core/CL/kernels/CLGaussianPyramidKernel.cpp index 8486d45e1a..73dbda22f3 100644 --- a/src/core/CL/kernels/CLGaussianPyramidKernel.cpp +++ b/src/core/CL/kernels/CLGaussianPyramidKernel.cpp @@ -47,7 +47,7 @@ void CLGaussianPyramidHorKernel::configure(const ICLTensor *input, ICLTensor *ou configure(CLKernelLibrary::get().get_compile_context(), input, output); } -void CLGaussianPyramidHorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) +void CLGaussianPyramidHorKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U16); @@ -158,7 +158,7 @@ void CLGaussianPyramidVertKernel::configure(const ICLTensor *input, ICLTensor *o configure(CLKernelLibrary::get().get_compile_context(), input, output); } -void CLGaussianPyramidVertKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) +void CLGaussianPyramidVertKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); diff --git a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp index 0f09152757..8baac18bf6 100644 --- a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp +++ b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp @@ -76,7 +76,7 @@ void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *a configure(CLKernelLibrary::get().get_compile_context(), anchors, all_anchors, info); } -void CLComputeAllAnchorsKernel::configure(CLCompileContext &compile_context, const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info) +void CLComputeAllAnchorsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info) { ARM_COMPUTE_ERROR_ON_NULLPTR(anchors, all_anchors); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(anchors->info(), all_anchors->info(), info)); diff --git a/src/core/CL/kernels/CLHOGDescriptorKernel.cpp b/src/core/CL/kernels/CLHOGDescriptorKernel.cpp index f79388e93d..e58b62e9de 100644 --- a/src/core/CL/kernels/CLHOGDescriptorKernel.cpp +++ b/src/core/CL/kernels/CLHOGDescriptorKernel.cpp @@ -51,7 +51,7 @@ void CLHOGOrientationBinningKernel::configure(const ICLTensor *input_magnitude, configure(CLKernelLibrary::get().get_compile_context(), input_magnitude, input_phase, output, hog_info); } -void CLHOGOrientationBinningKernel::configure(CLCompileContext &compile_context, const ICLTensor *input_magnitude, const ICLTensor *input_phase, ICLTensor *output, const HOGInfo *hog_info) +void CLHOGOrientationBinningKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input_magnitude, const ICLTensor *input_phase, ICLTensor *output, const HOGInfo *hog_info) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_magnitude, 1, DataType::S16); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_phase, 1, DataType::U8); @@ -147,7 +147,7 @@ void CLHOGBlockNormalizationKernel::configure(const ICLTensor *input, ICLTensor configure(CLKernelLibrary::get().get_compile_context(), input, output, hog_info); } -void CLHOGBlockNormalizationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const HOGInfo *hog_info) +void CLHOGBlockNormalizationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const HOGInfo *hog_info) { ARM_COMPUTE_ERROR_ON(hog_info == nullptr); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, hog_info->num_bins(), DataType::F32); diff --git a/src/core/CL/kernels/CLHOGDetectorKernel.cpp b/src/core/CL/kernels/CLHOGDetectorKernel.cpp index 02fad20a05..bee9744aad 100644 --- a/src/core/CL/kernels/CLHOGDetectorKernel.cpp +++ b/src/core/CL/kernels/CLHOGDetectorKernel.cpp @@ -48,7 +48,7 @@ void CLHOGDetectorKernel::configure(const ICLTensor *input, const ICLHOG *hog, I configure(CLKernelLibrary::get().get_compile_context(), input, hog, detection_windows, num_detection_windows, detection_window_stride, threshold, idx_class); } -void CLHOGDetectorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLHOG *hog, ICLDetectionWindowArray *detection_windows, cl::Buffer *num_detection_windows, +void CLHOGDetectorKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLHOG *hog, ICLDetectionWindowArray *detection_windows, cl::Buffer *num_detection_windows, const Size2D &detection_window_stride, float threshold, uint16_t idx_class) { diff --git a/src/core/CL/kernels/CLHarrisCornersKernel.cpp b/src/core/CL/kernels/CLHarrisCornersKernel.cpp index 2c344c7160..313d95fb03 100644 --- a/src/core/CL/kernels/CLHarrisCornersKernel.cpp +++ b/src/core/CL/kernels/CLHarrisCornersKernel.cpp @@ -59,7 +59,7 @@ void CLHarrisScoreKernel::configure(const ICLImage *input1, const ICLImage *inpu configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, block_size, norm_factor, strength_thresh, sensitivity, border_undefined); } -void CLHarrisScoreKernel::configure(CLCompileContext &compile_context, const ICLImage *input1, const ICLImage *input2, ICLImage *output, +void CLHarrisScoreKernel::configure(const CLCompileContext &compile_context, const ICLImage *input1, const ICLImage *input2, ICLImage *output, int32_t block_size, float norm_factor, float strength_thresh, float sensitivity, bool border_undefined) { diff --git a/src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp index 8d9e1b9f9d..5c0eb2a606 100644 --- a/src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp +++ b/src/core/CL/kernels/CLHeightConcatenateLayerKernel.cpp @@ -94,7 +94,7 @@ void CLHeightConcatenateLayerKernel::configure(const ICLTensor *input, unsigned configure(CLKernelLibrary::get().get_compile_context(), input, height_offset, output); } -void CLHeightConcatenateLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, unsigned int height_offset, ICLTensor *output) +void CLHeightConcatenateLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, unsigned int height_offset, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), height_offset, output->info())); diff --git a/src/core/CL/kernels/CLHistogramKernel.cpp b/src/core/CL/kernels/CLHistogramKernel.cpp index 5c44f6eec5..f16fa8c9fb 100644 --- a/src/core/CL/kernels/CLHistogramKernel.cpp +++ b/src/core/CL/kernels/CLHistogramKernel.cpp @@ -56,7 +56,7 @@ void CLHistogramKernel::configure(const ICLImage *input, ICLDistribution1D *outp configure(CLKernelLibrary::get().get_compile_context(), input, output); } -void CLHistogramKernel::configure(CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output) +void CLHistogramKernel::configure(const CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output) { ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input); ARM_COMPUTE_ERROR_ON(nullptr == output); @@ -166,7 +166,7 @@ void CLHistogramBorderKernel::configure(const ICLImage *input, ICLDistribution1D configure(CLKernelLibrary::get().get_compile_context(), input, output); } -void CLHistogramBorderKernel::configure(CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output) +void CLHistogramBorderKernel::configure(const CLCompileContext &compile_context, const ICLImage *input, ICLDistribution1D *output) { ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input); ARM_COMPUTE_ERROR_ON(nullptr == output); diff --git a/src/core/CL/kernels/CLIm2ColKernel.cpp b/src/core/CL/kernels/CLIm2ColKernel.cpp index b24d2509d1..078aad2356 100644 --- a/src/core/CL/kernels/CLIm2ColKernel.cpp +++ b/src/core/CL/kernels/CLIm2ColKernel.cpp @@ -298,7 +298,7 @@ void CLIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const configure(CLKernelLibrary::get().get_compile_context(), input, output, kernel_dims, conv_info, has_bias, dilation, num_groups); } -void CLIm2ColKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, +void CLIm2ColKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, unsigned int num_groups) { diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp index 62a0485eff..0eb2c50e6f 100644 --- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp @@ -80,7 +80,7 @@ void CLInstanceNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor * configure(CLKernelLibrary::get().get_compile_context(), input, output, info); } -void CLInstanceNormalizationLayerKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info) +void CLInstanceNormalizationLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input); diff --git a/src/core/CL/kernels/CLIntegralImageKernel.cpp b/src/core/CL/kernels/CLIntegralImageKernel.cpp index 415531d85c..4c3445d1ae 100644 --- a/src/core/CL/kernels/CLIntegralImageKernel.cpp +++ b/src/core/CL/kernels/CLIntegralImageKernel.cpp @@ -42,7 +42,7 @@ void CLIntegralImageHorKernel::configure(const ICLTensor *input, ICLTensor *outp configure(CLKernelLibrary::get().get_compile_context(), input, output); } -void CLIntegralImageHorKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) +void CLIntegralImageHorKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32); @@ -93,7 +93,7 @@ void CLIntegralImageVertKernel::configure(ICLTensor *in_out) configure(CLKernelLibrary::get().get_compile_context(), in_out); } -void CLIntegralImageVertKernel::configure(CLCompileContext &compile_context, ICLTensor *in_out) +void CLIntegralImageVertKernel::configure(const CLCompileContext &compile_context, ICLTensor *in_out) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(in_out, 1, DataType::U32); diff --git a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp index 1817d15d3e..e04950d0a2 100644 --- a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp +++ b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp @@ -100,7 +100,7 @@ void CLL2NormalizeLayerKernel::configure(const ICLTensor *input, const ICLTensor configure(CLKernelLibrary::get().get_compile_context(), input, sum, output, axis, epsilon); } -void CLL2NormalizeLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon) +void CLL2NormalizeLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, int axis, float epsilon) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), sum->info(), output->info(), axis, epsilon)); diff --git a/src/core/CL/kernels/CLLKTrackerKernel.cpp b/src/core/CL/kernels/CLLKTrackerKernel.cpp index 3a7c1b5b9e..a2948d38fe 100644 --- a/src/core/CL/kernels/CLLKTrackerKernel.cpp +++ b/src/core/CL/kernels/CLLKTrackerKernel.cpp @@ -45,7 +45,7 @@ void CLLKTrackerInitKernel::configure(const ICLKeyPointArray *old_points, const configure(CLKernelLibrary::get().get_compile_context(), old_points, new_points_estimates, old_points_internal, new_points_internal, use_initial_estimate, level, num_levels, pyramid_scale); } -void CLLKTrackerInitKernel::configure(CLCompileContext &compile_context, const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates, +void CLLKTrackerInitKernel::configure(const CLCompileContext &compile_context, const ICLKeyPointArray *old_points, const ICLKeyPointArray *new_points_estimates, ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal, bool use_initial_estimate, size_t level, size_t num_levels, float pyramid_scale) @@ -98,7 +98,7 @@ void CLLKTrackerFinalizeKernel::configure(ICLLKInternalKeypointArray *new_points configure(CLKernelLibrary::get().get_compile_context(), new_points_internal, new_points); } -void CLLKTrackerFinalizeKernel::configure(CLCompileContext &compile_context, ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points) +void CLLKTrackerFinalizeKernel::configure(const CLCompileContext &compile_context, ICLLKInternalKeypointArray *new_points_internal, ICLKeyPointArray *new_points) { ARM_COMPUTE_ERROR_ON(new_points_internal == nullptr); @@ -140,7 +140,7 @@ void CLLKTrackerStage0Kernel::configure(const ICLTensor *old_input, const ICLTen configure(CLKernelLibrary::get().get_compile_context(), old_input, old_scharr_gx, old_scharr_gy, old_points_internal, new_points_internal, coeff_table, old_ival, window_dimension, level); } -void CLLKTrackerStage0Kernel::configure(CLCompileContext &compile_context, const ICLTensor *old_input, const ICLTensor *old_scharr_gx, const ICLTensor *old_scharr_gy, +void CLLKTrackerStage0Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *old_input, const ICLTensor *old_scharr_gx, const ICLTensor *old_scharr_gy, ICLLKInternalKeypointArray *old_points_internal, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival, size_t window_dimension, size_t level) @@ -236,7 +236,7 @@ void CLLKTrackerStage1Kernel::configure(const ICLTensor *new_input, ICLLKInterna configure(CLKernelLibrary::get().get_compile_context(), new_input, new_points_internal, coeff_table, old_ival, termination, epsilon, num_iterations, window_dimension, level); } -void CLLKTrackerStage1Kernel::configure(CLCompileContext &compile_context, const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, +void CLLKTrackerStage1Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *new_input, ICLLKInternalKeypointArray *new_points_internal, ICLCoefficientTableArray *coeff_table, ICLOldValArray *old_ival, Termination termination, float epsilon, size_t num_iterations, size_t window_dimension, size_t level) diff --git a/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp index fb750583c0..04ad754cbf 100644 --- a/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp @@ -86,7 +86,7 @@ void CLLocallyConnectedMatrixMultiplyKernel::configure(const ICLTensor *input0, configure(CLKernelLibrary::get().get_compile_context(), input0, input1, output); } -void CLLocallyConnectedMatrixMultiplyKernel::configure(CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output) +void CLLocallyConnectedMatrixMultiplyKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info())); diff --git a/src/core/CL/kernels/CLMagnitudePhaseKernel.cpp b/src/core/CL/kernels/CLMagnitudePhaseKernel.cpp index 2c28e030d2..88c10342f4 100644 --- a/src/core/CL/kernels/CLMagnitudePhaseKernel.cpp +++ b/src/core/CL/kernels/CLMagnitudePhaseKernel.cpp @@ -50,7 +50,7 @@ void CLMagnitudePhaseKernel::configure(const ICLTensor *gx, const ICLTensor *gy, configure(CLKernelLibrary::get().get_compile_context(), gx, gy, magnitude, phase, mag_type, phase_type); } -void CLMagnitudePhaseKernel::configure(CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, +void CLMagnitudePhaseKernel::configure(const CLCompileContext &compile_context, const ICLTensor *gx, const ICLTensor *gy, ICLTensor *magnitude, ICLTensor *phase, MagnitudeType mag_type, PhaseType phase_type) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gx, 1, DataType::S16, DataType::S32); diff --git a/src/core/CL/kernels/CLMeanStdDevKernel.cpp b/src/core/CL/kernels/CLMeanStdDevKernel.cpp index 5a6630d5d4..de8b57ef17 100644 --- a/src/core/CL/kernels/CLMeanStdDevKernel.cpp +++ b/src/core/CL/kernels/CLMeanStdDevKernel.cpp @@ -68,7 +68,7 @@ void CLMeanStdDevKernel::configure(const ICLImage *input, float *mean, cl::Buffe configure(CLKernelLibrary::get().get_compile_context(), input, mean, global_sum, stddev, global_sum_squared); } -void CLMeanStdDevKernel::configure(CLCompileContext &compile_context, const ICLImage *input, float *mean, cl::Buffer *global_sum, float *stddev, cl::Buffer *global_sum_squared) +void CLMeanStdDevKernel::configure(const CLCompileContext &compile_context, const ICLImage *input, float *mean, cl::Buffer *global_sum, float *stddev, cl::Buffer *global_sum_squared) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, global_sum); ARM_COMPUTE_ERROR_ON(stddev && nullptr == global_sum_squared); diff --git a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp index 11ef86e8c3..4230570ae0 100644 --- a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp +++ b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp @@ -88,7 +88,7 @@ void CLMeanStdDevNormalizationKernel::configure(ICLTensor *input, ICLTensor *out configure(CLKernelLibrary::get().get_compile_context(), input, output, epsilon); } -void CLMeanStdDevNormalizationKernel::configure(CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float epsilon) +void CLMeanStdDevNormalizationKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float epsilon) { ARM_COMPUTE_ERROR_ON_NULLPTR(input); diff --git a/src/core/CL/kernels/CLMedian3x3Kernel.cpp b/src/core/CL/kernels/CLMedian3x3Kernel.cpp index cfc9591584..3b1b6ada03 100644 --- a/src/core/CL/kernels/CLMedian3x3Kernel.cpp +++ b/src/core/CL/kernels/CLMedian3x3Kernel.cpp @@ -42,7 +42,7 @@ void CLMedian3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, boo configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined); } -void CLMedian3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) +void CLMedian3x3Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); diff --git a/src/core/CL/kernels/CLMemsetKernel.cpp b/src/core/CL/kernels/CLMemsetKernel.cpp index 9b37cb81fd..08bb0a607e 100644 --- a/src/core/CL/kernels/CLMemsetKernel.cpp +++ b/src/core/CL/kernels/CLMemsetKernel.cpp @@ -47,7 +47,7 @@ void CLMemsetKernel::configure(ICLTensor *tensor, configure(CLKernelLibrary::get().get_compile_context(), tensor, constant_value, window); } -void CLMemsetKernel::configure(CLCompileContext &compile_context, ICLTensor *tensor, +void CLMemsetKernel::configure(const CLCompileContext &compile_context, ICLTensor *tensor, const PixelValue &constant_value, Window *window) { diff --git a/src/core/CL/kernels/CLMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLMinMaxLayerKernel.cpp index c89bbcb320..2ff9196f13 100644 --- a/src/core/CL/kernels/CLMinMaxLayerKernel.cpp +++ b/src/core/CL/kernels/CLMinMaxLayerKernel.cpp @@ -91,7 +91,7 @@ void CLMinMaxLayerKernel::configure(const ICLTensor *input, ICLTensor *output) configure(CLKernelLibrary::get().get_compile_context(), input, output); } -void CLMinMaxLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) +void CLMinMaxLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); diff --git a/src/core/CL/kernels/CLMinMaxLocationKernel.cpp b/src/core/CL/kernels/CLMinMaxLocationKernel.cpp index 77c945bed1..dfa0555331 100644 --- a/src/core/CL/kernels/CLMinMaxLocationKernel.cpp +++ b/src/core/CL/kernels/CLMinMaxLocationKernel.cpp @@ -65,7 +65,7 @@ void CLMinMaxKernel::configure(const ICLImage *input, cl::Buffer *min_max) configure(CLKernelLibrary::get().get_compile_context(), input, min_max); } -void CLMinMaxKernel::configure(CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max) +void CLMinMaxKernel::configure(const CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S16, DataType::F32); ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input); @@ -177,7 +177,7 @@ void CLMinMaxLocationKernel::configure(const ICLImage *input, cl::Buffer *min_ma configure(CLKernelLibrary::get().get_compile_context(), input, min_max, min_max_count, min_loc, max_loc); } -void CLMinMaxLocationKernel::configure(CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max, cl::Buffer *min_max_count, ICLCoordinates2DArray *min_loc, +void CLMinMaxLocationKernel::configure(const CLCompileContext &compile_context, const ICLImage *input, cl::Buffer *min_max, cl::Buffer *min_max_count, ICLCoordinates2DArray *min_loc, ICLCoordinates2DArray *max_loc) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S16, DataType::F32); diff --git a/src/core/CL/kernels/CLNonLinearFilterKernel.cpp b/src/core/CL/kernels/CLNonLinearFilterKernel.cpp index 01b8733ab8..5066c3b16a 100644 --- a/src/core/CL/kernels/CLNonLinearFilterKernel.cpp +++ b/src/core/CL/kernels/CLNonLinearFilterKernel.cpp @@ -60,7 +60,7 @@ void CLNonLinearFilterKernel::configure(const ICLTensor *input, ICLTensor *outpu configure(CLKernelLibrary::get().get_compile_context(), input, output, function, mask_size, pattern, mask, border_undefined); } -void CLNonLinearFilterKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NonLinearFilterFunction function, +void CLNonLinearFilterKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NonLinearFilterFunction function, unsigned int mask_size, MatrixPattern pattern, const uint8_t *mask, bool border_undefined) { diff --git a/src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp b/src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp index dd6aa1ea8f..7de7735f0c 100644 --- a/src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp +++ b/src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.cpp @@ -46,7 +46,7 @@ void CLNonMaximaSuppression3x3Kernel::configure(const ICLTensor *input, ICLTenso configure(CLKernelLibrary::get().get_compile_context(), input, output, border_undefined); } -void CLNonMaximaSuppression3x3Kernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) +void CLNonMaximaSuppression3x3Kernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, bool border_undefined) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::F32); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::F32); diff --git a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp index 6284a6acb4..7c8c23238d 100644 --- a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp @@ -110,7 +110,7 @@ void CLNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *ou configure(CLKernelLibrary::get().get_compile_context(), input, output, norm_info); } -void CLNormalizationLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info) +void CLNormalizationLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp index d46581e4dc..2ca77161ea 100644 --- a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp +++ b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp @@ -100,7 +100,7 @@ void CLNormalizePlanarYUVLayerKernel::configure(const ICLTensor *input, ICLTenso configure(CLKernelLibrary::get().get_compile_context(), input, output, mean, std); } -void CLNormalizePlanarYUVLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std) +void CLNormalizePlanarYUVLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std) { // Perform validation step ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, mean, std); diff --git a/src/core/CL/kernels/CLPadLayerKernel.cpp b/src/core/CL/kernels/CLPadLayerKernel.cpp index 764e2a41e7..82508ec8ba 100644 --- a/src/core/CL/kernels/CLPadLayerKernel.cpp +++ b/src/core/CL/kernels/CLPadLayerKernel.cpp @@ -101,7 +101,7 @@ void CLPadLayerKernel::configure(const ICLTensor *input, ICLTensor *output, cons configure(CLKernelLibrary::get().get_compile_context(), input, output, padding, constant_value, mode); } -void CLPadLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode) +void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode) { // Perform validation step ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLPermuteKernel.cpp b/src/core/CL/kernels/CLPermuteKernel.cpp index 3f1f870802..bf8425c026 100644 --- a/src/core/CL/kernels/CLPermuteKernel.cpp +++ b/src/core/CL/kernels/CLPermuteKernel.cpp @@ -80,7 +80,7 @@ void CLPermuteKernel::configure(const ICLTensor *input, ICLTensor *output, const configure(CLKernelLibrary::get().get_compile_context(), input, output, perm); } -void CLPermuteKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PermutationVector &perm) +void CLPermuteKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PermutationVector &perm) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), perm)); diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp index 49f5e04433..585715a6e6 100644 --- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp +++ b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp @@ -148,7 +148,7 @@ void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const I configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, scale, overflow_policy, rounding_policy, act_info); } -void CLPixelWiseMultiplicationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale, +void CLPixelWiseMultiplicationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output); @@ -379,7 +379,7 @@ void CLComplexPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); } -void CLComplexPixelWiseMultiplicationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +void CLComplexPixelWiseMultiplicationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1->info(), input2->info(), output->info(), act_info)); diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp index 43b8f85c39..cf1d7dd8dd 100644 --- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp +++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp @@ -181,7 +181,7 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, configure(CLKernelLibrary::get().get_compile_context(), input, output, pool_info, indices); } -void CLPoolingLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices) +void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); diff --git a/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp b/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp index 9f930c54c2..07f669af62 100644 --- a/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp +++ b/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp @@ -105,7 +105,7 @@ void CLPriorBoxLayerKernel::configure(const ICLTensor *input1, const ICLTensor * configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, info, min, max, aspect_ratios); } -void CLPriorBoxLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min, +void CLPriorBoxLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const PriorBoxLayerInfo &info, cl::Buffer *min, cl::Buffer *max, cl::Buffer *aspect_ratios) { ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output); diff --git a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp index 187c517088..b9767e8ec2 100644 --- a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp +++ b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp @@ -78,7 +78,7 @@ CLQLSTMLayerNormalizationKernel::CLQLSTMLayerNormalizationKernel() { } -void CLQLSTMLayerNormalizationKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *weight, const ICLTensor *bias) +void CLQLSTMLayerNormalizationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *weight, const ICLTensor *bias) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weight, bias, output); diff --git a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp index e017946673..b4b2217391 100644 --- a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp @@ -84,7 +84,7 @@ void CLQuantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *out configure(CLKernelLibrary::get().get_compile_context(), input, output); } -void CLQuantizationLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) +void CLQuantizationLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); diff --git a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp index cc1af52342..de99223bbc 100644 --- a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp +++ b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp @@ -108,7 +108,7 @@ void CLROIAlignLayerKernel::configure(const ICLTensor *input, const ICLTensor *r configure(CLKernelLibrary::get().get_compile_context(), input, rois, output, pool_info); } -void CLROIAlignLayerKernel::configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info) +void CLROIAlignLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, rois); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), rois->info(), output->info(), pool_info)); -- cgit v1.2.1