From 2eb5d16b839cbc28c6cb7f0de7a0bf15290b425a Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 2 Jul 2021 09:01:49 +0100 Subject: Align kernel/operator header layout - Redirect validate documentation to configure - Align header names - Align class layout Signed-off-by: Georgios Pinitas Change-Id: Ia40f67383826a66e9f9a33745d66805551e31a3a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5897 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio --- arm_compute/runtime/CL/functions/CLCrop.h | 6 +-- src/core/cpu/kernels/CpuAddKernel.h | 6 +-- src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp | 5 --- src/core/cpu/kernels/CpuConcatenateBatchKernel.h | 21 ++++------ src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp | 5 --- src/core/cpu/kernels/CpuConcatenateDepthKernel.h | 18 ++++----- .../cpu/kernels/CpuConcatenateHeightKernel.cpp | 5 --- src/core/cpu/kernels/CpuConcatenateHeightKernel.h | 16 ++++---- src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp | 5 --- src/core/cpu/kernels/CpuConcatenateWidthKernel.h | 16 ++++---- .../CpuConvertFullyConnectedWeightsKernel.cpp | 5 --- .../CpuConvertFullyConnectedWeightsKernel.h | 20 ++++----- src/core/cpu/kernels/CpuCopyKernel.h | 6 +-- .../cpu/kernels/CpuDepthwiseConv2dNativeKernel.cpp | 10 ++--- .../cpu/kernels/CpuDepthwiseConv2dNativeKernel.h | 28 ++++++------- src/core/cpu/kernels/CpuDequantizeKernel.h | 1 - src/core/cpu/kernels/CpuDirectConv2dKernel.h | 6 +-- .../cpu/kernels/CpuDirectConv2dOutputStageKernel.h | 8 ++-- src/core/cpu/kernels/CpuElementwiseKernel.h | 5 --- src/core/cpu/kernels/CpuElementwiseUnaryKernel.cpp | 10 ++--- src/core/cpu/kernels/CpuElementwiseUnaryKernel.h | 23 +++-------- src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h | 3 +- .../CpuGemmLowpQuantizeDownInt32ScaleKernel.h | 8 ++-- ...antizeDownInt32ToInt16ScaleByFixedPointKernel.h | 8 ++-- ...uantizeDownInt32ToInt8ScaleByFixedPointKernel.h | 8 ++-- ...antizeDownInt32ToUint8ScaleByFixedPointKernel.h | 8 ++-- src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h | 1 - src/core/cpu/kernels/CpuGemmMatrixMultiplyKernel.h | 3 +- src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h | 3 +- src/core/cpu/kernels/CpuMulKernel.h | 2 - src/core/cpu/kernels/CpuPermuteKernel.h | 8 +--- src/core/cpu/kernels/CpuPool2dKernel.h | 3 +- src/core/cpu/kernels/CpuQuantizeKernel.h | 1 - src/core/cpu/kernels/CpuReshapeKernel.h | 5 +-- src/core/cpu/kernels/CpuScaleKernel.h | 3 +- src/core/cpu/kernels/CpuSoftmaxKernel.h | 8 ++-- src/core/cpu/kernels/CpuTransposeKernel.h | 5 +-- src/core/gpu/cl/kernels/ClActivationKernel.cpp | 1 - src/core/gpu/cl/kernels/ClActivationKernel.h | 9 ++--- .../gpu/cl/kernels/ClBatchConcatenateKernel.cpp | 1 - src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h | 9 ++--- .../kernels/ClConvertFullyConnectedWeightsKernel.h | 17 ++++---- src/core/gpu/cl/kernels/ClCopyKernel.h | 8 ++-- src/core/gpu/cl/kernels/ClCropKernel.cpp | 5 --- src/core/gpu/cl/kernels/ClCropKernel.h | 32 +++------------ src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h | 7 +--- src/core/gpu/cl/kernels/ClDequantizeKernel.h | 1 - src/core/gpu/cl/kernels/ClDirectConv2dKernel.h | 2 +- src/core/gpu/cl/kernels/ClElementwiseKernel.cpp | 9 ----- src/core/gpu/cl/kernels/ClElementwiseKernel.h | 47 +++++----------------- src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h | 6 +-- src/core/gpu/cl/kernels/ClFillKernel.cpp | 7 ---- src/core/gpu/cl/kernels/ClFillKernel.h | 15 ++----- src/core/gpu/cl/kernels/ClFloorKernel.h | 6 +-- .../kernels/ClGemmLowpMatrixMultiplyNativeKernel.h | 2 +- .../ClGemmLowpMatrixMultiplyReshapedKernel.h | 2 +- .../cl/kernels/ClGemmMatrixMultiplyNativeKernel.h | 2 +- .../gpu/cl/kernels/ClHeightConcatenateKernel.h | 13 +++--- src/core/gpu/cl/kernels/ClMulKernel.h | 2 - src/core/gpu/cl/kernels/ClPermuteKernel.h | 11 ++--- src/core/gpu/cl/kernels/ClPool2dKernel.cpp | 1 - src/core/gpu/cl/kernels/ClPool2dKernel.h | 9 ++--- src/core/gpu/cl/kernels/ClQuantizeKernel.h | 1 - src/core/gpu/cl/kernels/ClReshapeKernel.h | 8 ++-- src/core/gpu/cl/kernels/ClScaleKernel.h | 16 ++------ src/core/gpu/cl/kernels/ClSoftmaxKernel.h | 22 ++++------ src/core/gpu/cl/kernels/ClTransposeKernel.h | 7 ++-- .../cl/kernels/ClWidthConcatenate2TensorsKernel.h | 7 +--- .../cl/kernels/ClWidthConcatenate4TensorsKernel.h | 9 +---- src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h | 13 +++--- .../cl/kernels/ClWinogradFilterTransformKernel.h | 3 +- .../cl/kernels/ClWinogradInputTransformKernel.cpp | 1 - .../cl/kernels/ClWinogradInputTransformKernel.h | 13 +++--- .../cl/kernels/ClWinogradOutputTransformKernel.h | 4 +- src/runtime/cpu/operators/CpuActivation.h | 8 +--- src/runtime/cpu/operators/CpuAdd.h | 10 +---- src/runtime/cpu/operators/CpuCast.h | 2 - src/runtime/cpu/operators/CpuConcatenate.cpp | 5 --- src/runtime/cpu/operators/CpuConcatenate.h | 19 +++------ .../operators/CpuConvertFullyConnectedWeights.h | 15 +++---- src/runtime/cpu/operators/CpuCopy.h | 8 +--- src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp | 17 -------- src/runtime/cpu/operators/CpuDepthwiseConv2d.h | 21 +++++----- .../operators/CpuDepthwiseConv2dAssemblyDispatch.h | 3 +- src/runtime/cpu/operators/CpuDequantize.h | 2 - src/runtime/cpu/operators/CpuDirectConv2d.h | 2 - src/runtime/cpu/operators/CpuElementwise.h | 33 +++++---------- src/runtime/cpu/operators/CpuElementwiseUnary.h | 4 +- src/runtime/cpu/operators/CpuFill.h | 2 - src/runtime/cpu/operators/CpuFlatten.h | 2 - src/runtime/cpu/operators/CpuFloor.h | 7 +--- src/runtime/cpu/operators/CpuGemmDirectConv2d.h | 2 - src/runtime/cpu/operators/CpuGemmLowpOutputStage.h | 10 ++--- src/runtime/cpu/operators/CpuMul.h | 4 -- src/runtime/cpu/operators/CpuPermute.h | 10 +---- src/runtime/cpu/operators/CpuPool2d.h | 2 - src/runtime/cpu/operators/CpuQuantize.h | 2 - src/runtime/cpu/operators/CpuReshape.h | 8 +--- src/runtime/cpu/operators/CpuScale.cpp | 18 ++++----- src/runtime/cpu/operators/CpuScale.h | 16 +++----- src/runtime/cpu/operators/CpuSoftmax.h | 10 +---- src/runtime/cpu/operators/CpuSub.h | 21 +--------- src/runtime/cpu/operators/CpuTranspose.h | 7 +--- src/runtime/gpu/cl/operators/ClActivation.h | 8 +--- src/runtime/gpu/cl/operators/ClAdd.h | 24 +---------- src/runtime/gpu/cl/operators/ClCast.h | 2 - src/runtime/gpu/cl/operators/ClConcatenate.cpp | 7 ---- src/runtime/gpu/cl/operators/ClConcatenate.h | 19 +++------ .../cl/operators/ClConvertFullyConnectedWeights.h | 15 +++---- src/runtime/gpu/cl/operators/ClCopy.h | 8 +--- src/runtime/gpu/cl/operators/ClCrop.h | 21 +++------- src/runtime/gpu/cl/operators/ClDequantize.h | 2 - src/runtime/gpu/cl/operators/ClDirectConv2d.h | 1 - .../gpu/cl/operators/ClElementwiseOperations.h | 45 +++++---------------- src/runtime/gpu/cl/operators/ClElementwiseUnary.h | 45 +++++++-------------- src/runtime/gpu/cl/operators/ClFill.h | 8 +--- src/runtime/gpu/cl/operators/ClFlatten.h | 2 - src/runtime/gpu/cl/operators/ClFloor.h | 7 +--- src/runtime/gpu/cl/operators/ClLogicalNot.h | 5 +-- src/runtime/gpu/cl/operators/ClMul.h | 4 -- src/runtime/gpu/cl/operators/ClPRelu.h | 8 +--- src/runtime/gpu/cl/operators/ClPermute.h | 10 +---- src/runtime/gpu/cl/operators/ClQuantize.h | 2 - src/runtime/gpu/cl/operators/ClReshape.h | 8 +--- src/runtime/gpu/cl/operators/ClScale.h | 10 ++--- src/runtime/gpu/cl/operators/ClSoftmax.h | 8 ++-- src/runtime/gpu/cl/operators/ClSub.h | 24 +---------- src/runtime/gpu/cl/operators/ClTranspose.h | 7 +--- 128 files changed, 335 insertions(+), 865 deletions(-) diff --git a/arm_compute/runtime/CL/functions/CLCrop.h b/arm_compute/runtime/CL/functions/CLCrop.h index d2b72a5eff..a474215190 100644 --- a/arm_compute/runtime/CL/functions/CLCrop.h +++ b/arm_compute/runtime/CL/functions/CLCrop.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_CROP_H -#define ARM_COMPUTE_CL_CROP_H +#ifndef ARM_COMPUTE_CLCROP_H +#define ARM_COMPUTE_CLCROP_H #include "arm_compute/core/Types.h" #include "arm_compute/core/Window.h" @@ -111,4 +111,4 @@ private: std::unique_ptr _impl; }; } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_CROP_H */ +#endif /*ARM_COMPUTE_CLCROP_H */ diff --git a/src/core/cpu/kernels/CpuAddKernel.h b/src/core/cpu/kernels/CpuAddKernel.h index 3ebaa462ee..717d0132c6 100644 --- a/src/core/cpu/kernels/CpuAddKernel.h +++ b/src/core/cpu/kernels/CpuAddKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPUADDKERNEL_H -#define ARM_COMPUTE_CPUADDKERNEL_H +#ifndef ARM_COMPUTE_CPU_ADD_KERNEL_H +#define ARM_COMPUTE_CPU_ADD_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" @@ -84,4 +84,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPUADDKERNEL_H */ +#endif /* ARM_COMPUTE_CPU_ADD_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp b/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp index 5df5ac3dd0..16c0efc793 100644 --- a/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp +++ b/src/core/cpu/kernels/CpuConcatenateBatchKernel.cpp @@ -146,11 +146,6 @@ Status validate_arguments(const ITensorInfo *src, unsigned int batch_offset, con } } // namespace -CpuConcatenateBatchKernel::CpuConcatenateBatchKernel() - : _func(nullptr), _batch_offset(0) -{ -} - void CpuConcatenateBatchKernel::configure(const ITensorInfo *src, unsigned int batch_offset, ITensorInfo *dst) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); diff --git a/src/core/cpu/kernels/CpuConcatenateBatchKernel.h b/src/core/cpu/kernels/CpuConcatenateBatchKernel.h index 99e8d84d99..1706926fa8 100644 --- a/src/core/cpu/kernels/CpuConcatenateBatchKernel.h +++ b/src/core/cpu/kernels/CpuConcatenateBatchKernel.h @@ -21,17 +21,14 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_CONCATENATEBATCH_KERNEL_H -#define ARM_COMPUTE_CPU_CONCATENATEBATCH_KERNEL_H +#ifndef ARM_COMPUTE_CPU_CONCATENATE_BATCH_KERNEL_H +#define ARM_COMPUTE_CPU_CONCATENATE_BATCH_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" namespace arm_compute { -// Forward declarations -class ITensor; - namespace cpu { namespace kernels @@ -42,7 +39,7 @@ namespace kernels class CpuConcatenateBatchKernel : public ICpuKernel { public: - CpuConcatenateBatchKernel(); + CpuConcatenateBatchKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuConcatenateBatchKernel); /** Configure kernel for a given list of arguments * @@ -51,11 +48,9 @@ public: * @param[in,out] dst Destination tensor info. Data types supported: Same as @p src. */ void configure(const ITensorInfo *src, unsigned int batch_offset, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuConcatenateBatchKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: All. - * @param[in] batch_offset The offset on axis # 3. - * @param[in] dst Destination tensor info. Data types supported: Same as @p src. + * Similar to @ref CpuConcatenateBatchKernel::configure() * * @return a status */ @@ -69,10 +64,10 @@ private: using BatchConcatFunction = void(const ITensor *, ITensor *, unsigned int, const Window &); private: - BatchConcatFunction *_func; - unsigned int _batch_offset; + BatchConcatFunction *_func{ nullptr }; + unsigned int _batch_offset{ 0 }; }; } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_CONCATENATEBATCH_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_CONCATENATE_BATCH_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp b/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp index a7e5cd8c60..133499deb6 100644 --- a/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp +++ b/src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp @@ -146,11 +146,6 @@ Status validate_arguments(const ITensorInfo *input, unsigned int depth_offset, c } } // namespace -CpuConcatenateDepthKernel::CpuConcatenateDepthKernel() - : _func(nullptr), _depth_offset(0) -{ -} - void CpuConcatenateDepthKernel::configure(const ITensorInfo *src, unsigned int depth_offset, ITensorInfo *dst) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); diff --git a/src/core/cpu/kernels/CpuConcatenateDepthKernel.h b/src/core/cpu/kernels/CpuConcatenateDepthKernel.h index af89c2464f..3ec19a86d1 100644 --- a/src/core/cpu/kernels/CpuConcatenateDepthKernel.h +++ b/src/core/cpu/kernels/CpuConcatenateDepthKernel.h @@ -22,8 +22,8 @@ * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_CONCATENATEDEPTH_KERNEL_H -#define ARM_COMPUTE_CPU_CONCATENATEDEPTH_KERNEL_H +#ifndef ARM_COMPUTE_CPU_CONCATENATE_DEPTH_KERNEL_H +#define ARM_COMPUTE_CPU_CONCATENATE_DEPTH_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" @@ -43,7 +43,7 @@ namespace kernels class CpuConcatenateDepthKernel : public ICpuKernel { public: - CpuConcatenateDepthKernel(); + CpuConcatenateDepthKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuConcatenateDepthKernel); /** Configure kernel for a given list of arguments * @@ -56,11 +56,9 @@ public: * */ void configure(const ITensorInfo *src, unsigned int depth_offset, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuConcatenateDepthKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] depth_offset The offset on the Z axis. - * @param[in] dst Destination tensor info. Data types supported: Same as @p src. + * Similar to @ref CpuConcatenateDepthKernel::configure() * * @return a status */ @@ -74,10 +72,10 @@ private: using DepthConcatFunction = void(const ITensor *, ITensor *, unsigned int, const Window &); private: - DepthConcatFunction *_func; - unsigned int _depth_offset; + DepthConcatFunction *_func{ nullptr }; + unsigned int _depth_offset{ 0 }; }; } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_CONCATENATEDEPTH_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_CONCATENATE_DEPTH_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp b/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp index 54b972662b..dfd442b10a 100644 --- a/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp +++ b/src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp @@ -62,11 +62,6 @@ Status validate_arguments(const ITensorInfo *src, unsigned int height_offset, co } } // namespace -CpuConcatenateHeightKernel::CpuConcatenateHeightKernel() - : _height_offset(0) -{ -} - void CpuConcatenateHeightKernel::configure(const ITensorInfo *src, unsigned int height_offset, ITensorInfo *dst) { ARM_COMPUTE_UNUSED(src); diff --git a/src/core/cpu/kernels/CpuConcatenateHeightKernel.h b/src/core/cpu/kernels/CpuConcatenateHeightKernel.h index 609bb21da7..e5e15e1aee 100644 --- a/src/core/cpu/kernels/CpuConcatenateHeightKernel.h +++ b/src/core/cpu/kernels/CpuConcatenateHeightKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_CONCATENATEHEIGHT_KERNEL_H -#define ARM_COMPUTE_CPU_CONCATENATEHEIGHT_KERNEL_H +#ifndef ARM_COMPUTE_CPU_CONCATENATE_HEIGHT_KERNEL_H +#define ARM_COMPUTE_CPU_CONCATENATE_HEIGHT_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" @@ -39,7 +39,7 @@ namespace kernels class CpuConcatenateHeightKernel : public ICpuKernel { public: - CpuConcatenateHeightKernel(); + CpuConcatenateHeightKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuConcatenateHeightKernel); /** Configure kernel for a given list of arguments * @@ -49,11 +49,9 @@ public: * */ void configure(const ITensorInfo *src, unsigned int height_offset, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuConcatenateHeightKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: All - * @param[in] height_offset The starting offset on the Y axis for the output tensor. - * @param[in] dst Destination tensor info. Data types supported: Same as @p src. + * Similar to @ref CpuConcatenateHeightKernel::configure() * * @return a status */ @@ -64,9 +62,9 @@ public: const char *name() const override; private: - unsigned int _height_offset; + unsigned int _height_offset{ 0 }; }; } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_CONCATENATEHEIGHT_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_CONCATENATE_HEIGHT_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp b/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp index effcbc336c..ad33b0c951 100644 --- a/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp +++ b/src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp @@ -62,11 +62,6 @@ Status validate_arguments(const ITensorInfo *src, unsigned int width_offset, con } } // namespace -CpuConcatenateWidthKernel::CpuConcatenateWidthKernel() - : _width_offset(0) -{ -} - void CpuConcatenateWidthKernel::configure(const ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); diff --git a/src/core/cpu/kernels/CpuConcatenateWidthKernel.h b/src/core/cpu/kernels/CpuConcatenateWidthKernel.h index afdc3ccddd..f64191e173 100644 --- a/src/core/cpu/kernels/CpuConcatenateWidthKernel.h +++ b/src/core/cpu/kernels/CpuConcatenateWidthKernel.h @@ -22,8 +22,8 @@ * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_CONCATENATEWIDTH_KERNEL_H -#define ARM_COMPUTE_CPU_CONCATENATEWIDTH_KERNEL_H +#ifndef ARM_COMPUTE_CPU_CONCATENATE_WIDTH_KERNEL_H +#define ARM_COMPUTE_CPU_CONCATENATE_WIDTH_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" @@ -40,7 +40,7 @@ namespace kernels class CpuConcatenateWidthKernel : public ICPPKernel { public: - CpuConcatenateWidthKernel(); + CpuConcatenateWidthKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuConcatenateWidthKernel); /** Configure kernel for a given list of arguments * @@ -49,11 +49,9 @@ public: * @param[in,out] dst Destination tensor info. Data types supported: Same as @p src. */ void configure(const ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuConcatenateWidthKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: All - * @param[in] width_offset The offset on the X axis. - * @param[in] dst Destination tensor info. Data types supported: Same as @p src. + * Similar to @ref CpuConcatenateWidthKernel::configure() * * @return a status */ @@ -64,9 +62,9 @@ public: const char *name() const override; private: - unsigned int _width_offset; + unsigned int _width_offset{ 0 }; }; } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_CONCATENATEWIDTH_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_CONCATENATE_WIDTH_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.cpp b/src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.cpp index d91ee64ecf..5bf70dc9bf 100644 --- a/src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.cpp +++ b/src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.cpp @@ -34,11 +34,6 @@ namespace cpu { namespace kernels { -CpuConvertFullyConnectedWeightsKernel::CpuConvertFullyConnectedWeightsKernel() - : _factor1(0), _factor2(0) -{ -} - void CpuConvertFullyConnectedWeightsKernel::configure(const ITensorInfo *src, ITensorInfo *dst, const TensorShape &original_input_shape, DataLayout data_layout) diff --git a/src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.h b/src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.h index c867e3deeb..3ba3162c34 100644 --- a/src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.h +++ b/src/core/cpu/kernels/CpuConvertFullyConnectedWeightsKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_CONVERTFULLYCONNECTEDWEIGHTS_KERNEL_H -#define ARM_COMPUTE_CPU_CONVERTFULLYCONNECTEDWEIGHTS_KERNEL_H +#ifndef ARM_COMPUTE_CPU_CONVERT_FULLYCONNECTED_WEIGHTS_KERNEL_H +#define ARM_COMPUTE_CPU_CONVERT_FULLYCONNECTED_WEIGHTS_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" @@ -44,8 +44,7 @@ namespace kernels class CpuConvertFullyConnectedWeightsKernel : public ICpuKernel { public: - /** Default constructor */ - CpuConvertFullyConnectedWeightsKernel(); + CpuConvertFullyConnectedWeightsKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuConvertFullyConnectedWeightsKernel); /** Set the src and dst tensor. * @@ -55,12 +54,9 @@ public: * @param[in] data_layout The data layout the weights have been trained in. */ void configure(const ITensorInfo *src, ITensorInfo *dst, const TensorShape &original_input_shape, DataLayout data_layout); - /** Static function to check if given info will lead to a valid configuration of @ref CpuConvertFullyConnectedWeightsKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source weights tensor info to convert. Must be 2 dimensional. Data types supported: All. - * @param[in] dst The converted weights tensor info. Shape and Data Type: Same as @p src. - * @param[in] original_input_shape Shape of the original src tensor (the one entering fully connected layer). - * @param[in] data_layout The data layout the weights have been trained in. + * Similar to @ref CpuConvertFullyConnectedWeightsKernel::configure() * * @return a status */ @@ -71,8 +67,8 @@ public: const char *name() const override; private: - unsigned int _factor1; /* equals to the number of elements per original src plane if @p data_layout == NCHW; its number of channels otherwise */ - unsigned int _factor2; /* equals to the number of elements per original src plane if @p data_layout == NHWC; its number of channels otherwise */ + unsigned int _factor1{ 0 }; /* equals to the number of elements per original src plane if @p data_layout == NCHW; its number of channels otherwise */ + unsigned int _factor2{ 0 }; /* equals to the number of elements per original src plane if @p data_layout == NHWC; its number of channels otherwise */ /** Template function to run the permute * @@ -86,4 +82,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_CONVERTFULLYCONNECTEDWEIGHTS_KERNEL_H */ \ No newline at end of file +#endif /* ARM_COMPUTE_CPU_CONVERT_FULLYCONNECTED_WEIGHTS_KERNEL_H */ \ No newline at end of file diff --git a/src/core/cpu/kernels/CpuCopyKernel.h b/src/core/cpu/kernels/CpuCopyKernel.h index 98b79a964c..e2f1ed60a6 100644 --- a/src/core/cpu/kernels/CpuCopyKernel.h +++ b/src/core/cpu/kernels/CpuCopyKernel.h @@ -46,11 +46,9 @@ public: * @param[in] padding (Optional) Padding to be applied to the input tensor */ void configure(const ITensorInfo *src, ITensorInfo *dst, const PaddingList &padding = PaddingList()); - /** Static function to check if given info will lead to a valid configuration of @ref CpuCopyKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor. Data types supported: All - * @param[in] dst Destination tensor. Data types supported: same as @p src. - * @param[in] padding (Optional) Padding to be applied to the input tensor + * Similar to @ref CpuCopyKernel::configure() * * @return a status */ diff --git a/src/core/cpu/kernels/CpuDepthwiseConv2dNativeKernel.cpp b/src/core/cpu/kernels/CpuDepthwiseConv2dNativeKernel.cpp index eac9baaf01..5530eba9f1 100644 --- a/src/core/cpu/kernels/CpuDepthwiseConv2dNativeKernel.cpp +++ b/src/core/cpu/kernels/CpuDepthwiseConv2dNativeKernel.cpp @@ -803,11 +803,6 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, co } } // namespace -CpuDepthwiseConv2dNativeKernel::CpuDepthwiseConv2dNativeKernel() - : _func(), _conv_info(), _depth_multiplier(1), _dilation(), _output_multiplier(), _output_shift(), _has_biases() -{ -} - void CpuDepthwiseConv2dNativeKernel::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst); @@ -945,6 +940,11 @@ void CpuDepthwiseConv2dNativeKernel::run_op(ITensorPack &tensors, const Window & auto dst = tensors.get_tensor(TensorType::ACL_DST); (this->*_func)(src, weights, biases, dst, window, _has_biases); } + +const char *CpuDepthwiseConv2dNativeKernel::name() const +{ + return "CpuDepthwiseConv2dNativeKernel"; +} } // namespace kernels } // namespace cpu } // namespace arm_compute diff --git a/src/core/cpu/kernels/CpuDepthwiseConv2dNativeKernel.h b/src/core/cpu/kernels/CpuDepthwiseConv2dNativeKernel.h index 559c46dc93..eb7041f7b6 100644 --- a/src/core/cpu/kernels/CpuDepthwiseConv2dNativeKernel.h +++ b/src/core/cpu/kernels/CpuDepthwiseConv2dNativeKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_DEPTHWISECONV2DNATIVEKERNEL_H -#define ARM_COMPUTE_CPU_DEPTHWISECONV2DNATIVEKERNEL_H +#ifndef ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_NATIVE_KERNEL_H +#define ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_NATIVE_KERNEL_H #include "arm_compute/core/utils/misc/Traits.h" #include "src/core/common/Macros.h" @@ -43,12 +43,7 @@ namespace kernels class CpuDepthwiseConv2dNativeKernel : public ICpuKernel { public: - const char *name() const override - { - return "CpuDepthwiseConv2dNativeKernel"; - } - /** Default constructor */ - CpuDepthwiseConv2dNativeKernel(); + CpuDepthwiseConv2dNativeKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDepthwiseConv2dNativeKernel); /** Initialize the function's source, destination and parameters. @@ -75,6 +70,7 @@ public: // Inherited methods overridden: void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; + const char *name() const override; private: template @@ -95,15 +91,15 @@ private: */ using DepthwiseFunctionPtr = void (CpuDepthwiseConv2dNativeKernel::*)(const ITensor *src, const ITensor *weights, const ITensor *bias, ITensor *dst, const Window &window, bool has_biases); - DepthwiseFunctionPtr _func; - PadStrideInfo _conv_info; - unsigned int _depth_multiplier; - Size2D _dilation; - std::vector _output_multiplier; - std::vector _output_shift; - bool _has_biases; + DepthwiseFunctionPtr _func{ nullptr }; + PadStrideInfo _conv_info{}; + unsigned int _depth_multiplier{ 1 }; + Size2D _dilation{}; + std::vector _output_multiplier{}; + std::vector _output_shift{}; + bool _has_biases{ false }; }; } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_DEPTHWISECONV2DNATIVEKERNEL_H */ +#endif /* ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_NATIVE_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuDequantizeKernel.h b/src/core/cpu/kernels/CpuDequantizeKernel.h index 798f32cec7..e80aa3aaad 100644 --- a/src/core/cpu/kernels/CpuDequantizeKernel.h +++ b/src/core/cpu/kernels/CpuDequantizeKernel.h @@ -37,7 +37,6 @@ namespace kernels class CpuDequantizeKernel : public ICpuKernel { public: - /** Default constructor */ CpuDequantizeKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDequantizeKernel); /** Set input, output tensors. diff --git a/src/core/cpu/kernels/CpuDirectConv2dKernel.h b/src/core/cpu/kernels/CpuDirectConv2dKernel.h index 62ed96f255..9bef1c484a 100644 --- a/src/core/cpu/kernels/CpuDirectConv2dKernel.h +++ b/src/core/cpu/kernels/CpuDirectConv2dKernel.h @@ -21,15 +21,14 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_DIRECTCONV2D_KERNEL_H -#define ARM_COMPUTE_CPU_DIRECTCONV2D_KERNEL_H +#ifndef ARM_COMPUTE_CPU_DIRECT_CONV2D_KERNEL_H +#define ARM_COMPUTE_CPU_DIRECT_CONV2D_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" namespace arm_compute { -class ITensor; namespace cpu { namespace kernels @@ -38,7 +37,6 @@ namespace kernels class CpuDirectConv2dKernel : public ICpuKernel { public: - /** Default constructor */ CpuDirectConv2dKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDirectConv2dKernel); /** Set the src, weights, and dst tensors. diff --git a/src/core/cpu/kernels/CpuDirectConv2dOutputStageKernel.h b/src/core/cpu/kernels/CpuDirectConv2dOutputStageKernel.h index 62bc5d41c9..749411c0a7 100644 --- a/src/core/cpu/kernels/CpuDirectConv2dOutputStageKernel.h +++ b/src/core/cpu/kernels/CpuDirectConv2dOutputStageKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_DIRECTCONV2D_OUTPUTSTAGE_KERNEL_H -#define ARM_COMPUTE_CPU_DIRECTCONV2D_OUTPUTSTAGE_KERNEL_H +#ifndef ARM_COMPUTE_CPU_DIRECT_CONV2D_OUTPUT_STAGE_KERNEL_H +#define ARM_COMPUTE_CPU_DIRECT_CONV2D_OUTPUT_STAGE_KERNEL_H #include "arm_compute/core/KernelDescriptors.h" #include "src/core/common/Macros.h" @@ -30,7 +30,6 @@ namespace arm_compute { -class ITensor; namespace cpu { namespace kernels @@ -44,7 +43,6 @@ namespace kernels class CpuDirectConv2dOutputStageKernel : public ICpuKernel { public: - /** Default constructor */ CpuDirectConv2dOutputStageKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDirectConv2dOutputStageKernel); /** Set the accumulate buffer and the biases of the kernel. @@ -84,4 +82,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_DIRECTCONV2D_OUTPUTSTAGE_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_DIRECT_CONV2D_OUTPUT_STAGE_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuElementwiseKernel.h b/src/core/cpu/kernels/CpuElementwiseKernel.h index 50c8d29ac5..75137da65d 100644 --- a/src/core/cpu/kernels/CpuElementwiseKernel.h +++ b/src/core/cpu/kernels/CpuElementwiseKernel.h @@ -29,7 +29,6 @@ namespace arm_compute { -class ITensor; namespace cpu { namespace kernels @@ -89,7 +88,6 @@ protected: class CpuArithmeticKernel : public CpuElementwiseKernel { public: - /** Default constructor */ CpuArithmeticKernel() = default; /** Configure kernel @@ -130,7 +128,6 @@ private: class CpuDivisionKernel : public CpuArithmeticKernel { public: - /** Default constructor */ CpuDivisionKernel() = default; /** Configure kernel @@ -157,7 +154,6 @@ protected: class CpuPowerKernel : public CpuArithmeticKernel { public: - /** Default constructor */ CpuPowerKernel() = default; /** Configure kernel @@ -184,7 +180,6 @@ protected: class CpuComparisonKernel : public CpuElementwiseKernel { public: - /** Default constructor */ CpuComparisonKernel() = default; /** Configure kernel diff --git a/src/core/cpu/kernels/CpuElementwiseUnaryKernel.cpp b/src/core/cpu/kernels/CpuElementwiseUnaryKernel.cpp index 91fa75ebaf..56e3297e73 100644 --- a/src/core/cpu/kernels/CpuElementwiseUnaryKernel.cpp +++ b/src/core/cpu/kernels/CpuElementwiseUnaryKernel.cpp @@ -105,11 +105,6 @@ const ElementwiseUnaryKernel *get_implementation(DataType dt) } } // namespace -CpuElementwiseUnaryKernel::CpuElementwiseUnaryKernel() - : _op() -{ -} - void CpuElementwiseUnaryKernel::configure(ElementWiseUnary op, const ITensorInfo &src, ITensorInfo &dst) { ARM_COMPUTE_ERROR_THROW_ON(validate(op, src, dst)); @@ -169,6 +164,11 @@ void CpuElementwiseUnaryKernel::run_op(ITensorPack &tensors, const Window &windo ARM_COMPUTE_ERROR_ON(func == nullptr); func(src, dst, window, _op); } + +const char *CpuElementwiseUnaryKernel::name() const +{ + return "CpuElementwiseUnaryKernel"; +} } // namespace kernels } // namespace cpu } // namespace arm_compute diff --git a/src/core/cpu/kernels/CpuElementwiseUnaryKernel.h b/src/core/cpu/kernels/CpuElementwiseUnaryKernel.h index ceb90dcf70..43c6eded60 100644 --- a/src/core/cpu/kernels/CpuElementwiseUnaryKernel.h +++ b/src/core/cpu/kernels/CpuElementwiseUnaryKernel.h @@ -30,7 +30,6 @@ namespace arm_compute { -class ITensor; namespace cpu { namespace kernels @@ -39,19 +38,11 @@ namespace kernels * * Element-wise operation is computed by: * @f[ dst(x) = OP(src(x))@f] - * */ class CpuElementwiseUnaryKernel : public ICpuKernel { public: - const char *name() const override - { - return "CpuElementwiseUnaryKernel"; - } - /** Default constructor */ - CpuElementwiseUnaryKernel(); - /** Default destructor */ - ~CpuElementwiseUnaryKernel() = default; + CpuElementwiseUnaryKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuElementwiseUnaryKernel); /** Function to configure the @ref CpuElementwiseUnaryKernel @@ -61,19 +52,17 @@ public: * @param[out] dst Output tensor. Data types supported: Same as @p src. */ void configure(ElementWiseUnary op, const ITensorInfo &src, ITensorInfo &dst); - - /** Static function to check if given info will lead to a valid configuration of @ref CpuElementwiseUnaryKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] op Arithmetic operation to be executed. - * @param[in] src First tensor input info. Data types supported: F16/F32, F16/F32/S32 for NEG/ABS operations. - * @param[in] dst Output tensor info. Data types supported: Same as @p src. + * Similar to CpuElementwiseUnaryKernel::configure() * - * @return a Status + * @return a status */ static Status validate(ElementWiseUnary op, const ITensorInfo &src, const ITensorInfo &dst); // Inherited methods overridden: void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; + const char *name() const override; /** Common signature for all the specialised elementwise unary micro-kernels * @@ -82,7 +71,7 @@ public: using ElementwiseUnaryUkernelPtr = std::add_pointer::type; private: - ElementWiseUnary _op; + ElementWiseUnary _op{}; }; } // namespace kernels } // namespace cpu diff --git a/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h index 243aab9375..8f1a54314a 100644 --- a/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h +++ b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h @@ -55,7 +55,6 @@ namespace kernels class CpuGemmInterleave4x4Kernel : public ICpuKernel { public: - /** Default Constructor */ CpuGemmInterleave4x4Kernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmInterleave4x4Kernel); /** Initialise the kernel's src and dst. @@ -79,4 +78,4 @@ public: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H*/ +#endif /* ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h index 646242dc7e..f3cdbdc610 100644 --- a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h +++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32_SCALE_KERNEL_H -#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32_SCALE_KERNEL_H +#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32_SCALE_KERNEL_H +#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32_SCALE_KERNEL_H #include "arm_compute/core/KernelDescriptors.h" #include "src/core/common/Macros.h" @@ -30,6 +30,7 @@ namespace arm_compute { +// Forward declarations class ITensor; namespace cpu { @@ -53,7 +54,6 @@ namespace kernels class CpuGemmLowpQuantizeDownInt32ScaleKernel : public ICpuKernel { public: - /** Default constructor */ CpuGemmLowpQuantizeDownInt32ScaleKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpQuantizeDownInt32ScaleKernel); /** Initialise the kernel's input and output. @@ -104,4 +104,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32_SCALE_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32_SCALE_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h index 4d743e9a0a..7a1197d2cf 100644 --- a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h +++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT16_SCALEBYFIXEDPOINT_KERNEL_H -#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT16_SCALEBYFIXEDPOINT_KERNEL_H +#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32TOINT16_SCALEBYFIXEDPOINT_KERNEL_H +#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32TOINT16_SCALEBYFIXEDPOINT_KERNEL_H #include "arm_compute/core/KernelDescriptors.h" #include "src/core/common/Macros.h" @@ -30,6 +30,7 @@ namespace arm_compute { +// Forward declaration class ITensor; namespace cpu { @@ -50,7 +51,6 @@ namespace kernels class CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel : public ICpuKernel { public: - /** Default constructor */ CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel); /** Initialise the kernel's input and output. @@ -108,4 +108,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT16_SCALEBYFIXEDPOINT_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32TOINT16_SCALEBYFIXEDPOINT_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h index a941f1f542..9ebb529990 100644 --- a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h +++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT8_SCALEBYFIXEDPOINT_KERNEL_H -#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT8_SCALEBYFIXEDPOINT_KERNEL_H +#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32TOINT8_SCALEBYFIXEDPOINT_KERNEL_H +#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32TOINT8_SCALEBYFIXEDPOINT_KERNEL_H #include "arm_compute/core/KernelDescriptors.h" #include "src/core/common/Macros.h" @@ -30,6 +30,7 @@ namespace arm_compute { +// Forward declaration class ITensor; namespace cpu { @@ -51,7 +52,6 @@ namespace kernels class CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel : public ICpuKernel { public: - /** Default constructor */ CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel); /** Initialise the kernel's input and output. @@ -111,4 +111,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT8_SCALEBYFIXEDPOINT_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32TOINT8_SCALEBYFIXEDPOINT_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h index 9b4c056419..312cad971b 100644 --- a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h +++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOUINT8_SCALEBYFIXEDPOINT_KERNEL_H -#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOUINT8_SCALEBYFIXEDPOINT_KERNEL_H +#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32TOUINT8_SCALEBYFIXEDPOINT_KERNEL_H +#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32TOUINT8_SCALEBYFIXEDPOINT_KERNEL_H #include "arm_compute/core/KernelDescriptors.h" #include "src/core/common/Macros.h" @@ -30,6 +30,7 @@ namespace arm_compute { +// Forward declaration class ITensor; namespace cpu { @@ -51,7 +52,6 @@ namespace kernels class CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel : public ICpuKernel { public: - /** Default constructor */ CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel); /** Initialise the kernel's input and output. @@ -105,4 +105,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOUINT8_SCALEBYFIXEDPOINT_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32TOUINT8_SCALEBYFIXEDPOINT_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h index c8e6fa9589..f9450b962b 100644 --- a/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h +++ b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h @@ -44,7 +44,6 @@ namespace kernels class CpuGemmMatrixAdditionKernel : public ICpuKernel { public: - /** Constructor */ CpuGemmMatrixAdditionKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmMatrixAdditionKernel); /** Initialise the kernel's input and output. diff --git a/src/core/cpu/kernels/CpuGemmMatrixMultiplyKernel.h b/src/core/cpu/kernels/CpuGemmMatrixMultiplyKernel.h index bf13342739..974ff85606 100644 --- a/src/core/cpu/kernels/CpuGemmMatrixMultiplyKernel.h +++ b/src/core/cpu/kernels/CpuGemmMatrixMultiplyKernel.h @@ -42,7 +42,6 @@ namespace kernels class CpuGemmMatrixMultiplyKernel : public ICpuKernel { public: - /** Constructor */ CpuGemmMatrixMultiplyKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmMatrixMultiplyKernel); /** Initialise the kernel's input and output. @@ -89,4 +88,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_GEMM_MATRIX_MULTIPLY_KERNEL_H*/ +#endif /* ARM_COMPUTE_CPU_GEMM_MATRIX_MULTIPLY_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h index c9c22bd27b..1a9287f7b0 100644 --- a/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h +++ b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h @@ -71,7 +71,6 @@ namespace kernels class CpuGemmTranspose1xWKernel : public ICpuKernel { public: - /** Constructor */ CpuGemmTranspose1xWKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmTranspose1xWKernel); /** Configure kernel for a given list of arguments @@ -95,4 +94,4 @@ public: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuMulKernel.h b/src/core/cpu/kernels/CpuMulKernel.h index 3e667bc4be..3ea176cc31 100644 --- a/src/core/cpu/kernels/CpuMulKernel.h +++ b/src/core/cpu/kernels/CpuMulKernel.h @@ -37,7 +37,6 @@ namespace kernels class CpuMulKernel : public ICpuKernel { public: - /** Default constructor */ CpuMulKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuMulKernel); /** Initialise the kernel's input, dst and border mode. @@ -122,7 +121,6 @@ private: class CpuComplexMulKernel : public ICpuKernel { public: - /** Default constructor */ CpuComplexMulKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuComplexMulKernel); /** Initialise the kernel's src, dst and border mode. diff --git a/src/core/cpu/kernels/CpuPermuteKernel.h b/src/core/cpu/kernels/CpuPermuteKernel.h index 9c59d5b9d4..2955f38960 100644 --- a/src/core/cpu/kernels/CpuPermuteKernel.h +++ b/src/core/cpu/kernels/CpuPermuteKernel.h @@ -48,13 +48,9 @@ public: * @param[in] perm Permutation vector */ void configure(const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm); - /** Static function to check if given info will lead to a valid configuration of @ref CpuPermuteKernel + /** Static function to check if given info will lead to a valid configuration * - * @note Arbitrary permutation vectors are supported with rank not greater than 4 - * - * @param[in] src Source tensor to permute. Data types supported: All - * @param[in] dst Destination tensor. Data types supported: Same as @p src - * @param[in] perm Permutation vector + * Similar to @ref CpuPermuteKernel::configure() * * @return a status */ diff --git a/src/core/cpu/kernels/CpuPool2dKernel.h b/src/core/cpu/kernels/CpuPool2dKernel.h index ff7d7bb21d..9ed398b907 100644 --- a/src/core/cpu/kernels/CpuPool2dKernel.h +++ b/src/core/cpu/kernels/CpuPool2dKernel.h @@ -38,7 +38,6 @@ namespace kernels class CpuPool2dKernel : public ICpuKernel { public: - /** Default constructor */ CpuPool2dKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuPool2dKernel); /** Configure kernel for a given list of arguments @@ -80,4 +79,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_POOL2D_KERNEL_H */ +#endif /* ARM_COMPUTE_CPU_POOL2D_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuQuantizeKernel.h b/src/core/cpu/kernels/CpuQuantizeKernel.h index d3422d3fbd..834a2e03d2 100644 --- a/src/core/cpu/kernels/CpuQuantizeKernel.h +++ b/src/core/cpu/kernels/CpuQuantizeKernel.h @@ -40,7 +40,6 @@ namespace kernels class CpuQuantizeKernel : public ICpuKernel { public: - /** Default constructor */ CpuQuantizeKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuQuantizeKernel); /** Set the input, output. diff --git a/src/core/cpu/kernels/CpuReshapeKernel.h b/src/core/cpu/kernels/CpuReshapeKernel.h index add6782b9e..1425fbe917 100644 --- a/src/core/cpu/kernels/CpuReshapeKernel.h +++ b/src/core/cpu/kernels/CpuReshapeKernel.h @@ -46,10 +46,9 @@ public: */ void configure(const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuReshapeKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data type supported: All - * @param[in] dst Destination tensor info. Data type supported: Same as @p src + * Similar to @ref CpuReshapeKernel::configure() * * @return a status */ diff --git a/src/core/cpu/kernels/CpuScaleKernel.h b/src/core/cpu/kernels/CpuScaleKernel.h index afaf074340..a2b65370ba 100644 --- a/src/core/cpu/kernels/CpuScaleKernel.h +++ b/src/core/cpu/kernels/CpuScaleKernel.h @@ -38,7 +38,6 @@ namespace kernels class CpuScaleKernel : public ICpuKernel { public: - /** Default constructor */ CpuScaleKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuScaleKernel); /** Initialise the kernel's inputs, output and interpolation policy @@ -106,4 +105,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_SCALEKERNEL_H */ +#endif /* ARM_COMPUTE_CPU_SCALEKERNEL_H */ diff --git a/src/core/cpu/kernels/CpuSoftmaxKernel.h b/src/core/cpu/kernels/CpuSoftmaxKernel.h index 2912098c30..776c0d6f79 100644 --- a/src/core/cpu/kernels/CpuSoftmaxKernel.h +++ b/src/core/cpu/kernels/CpuSoftmaxKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_SOFTMAXKERNEL_H -#define ARM_COMPUTE_CPU_SOFTMAXKERNEL_H +#ifndef ARM_COMPUTE_CPU_SOFTMAX_KERNEL_H +#define ARM_COMPUTE_CPU_SOFTMAX_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" @@ -37,7 +37,6 @@ namespace kernels class CpuLogits1DMaxKernel : public ICpuKernel { public: - /** Constructor */ CpuLogits1DMaxKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuLogits1DMaxKernel); /** Set the input and output tensors. @@ -71,7 +70,6 @@ template class CpuLogits1DSoftmaxKernel : public ICpuKernel { public: - /** Default constructor */ CpuLogits1DSoftmaxKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuLogits1DSoftmaxKernel); @@ -110,4 +108,4 @@ private: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_SOFTMAXKERNEL_H */ +#endif /* ARM_COMPUTE_CPU_SOFTMAX_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuTransposeKernel.h b/src/core/cpu/kernels/CpuTransposeKernel.h index f09f427be8..920349d5e7 100644 --- a/src/core/cpu/kernels/CpuTransposeKernel.h +++ b/src/core/cpu/kernels/CpuTransposeKernel.h @@ -45,10 +45,9 @@ public: * @param[out] dst Destination tensor. Data types supported: Same as @p src */ void configure(const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuTransposeKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor to permute. Data types supported: All - * @param[in] dst Destination tensor. Data types supported: Same as @p src + * Similar to CpuTransposeKernel::configure() * * @return a status */ diff --git a/src/core/gpu/cl/kernels/ClActivationKernel.cpp b/src/core/gpu/cl/kernels/ClActivationKernel.cpp index e892d6a778..21c05632f9 100644 --- a/src/core/gpu/cl/kernels/ClActivationKernel.cpp +++ b/src/core/gpu/cl/kernels/ClActivationKernel.cpp @@ -87,7 +87,6 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const } // namespace ClActivationKernel::ClActivationKernel() - : _run_in_place(false) { _type = CLKernelType::ELEMENTWISE; } diff --git a/src/core/gpu/cl/kernels/ClActivationKernel.h b/src/core/gpu/cl/kernels/ClActivationKernel.h index 68c309e9e7..720b16a691 100644 --- a/src/core/gpu/cl/kernels/ClActivationKernel.h +++ b/src/core/gpu/cl/kernels/ClActivationKernel.h @@ -51,12 +51,9 @@ public: * @param[in] act_info Activation layer information. */ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, ActivationLayerInfo act_info); - /** Static function to check if given info will lead to a valid configuration of @ref ClActivationKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. In case of @p dst tensor info = nullptr, this tensor will store the result - * of the activation function. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32. - * @param[in] dst Destination tensor info. Data type supported: same as @p src - * @param[in] act_info Activation layer information. + * Similar to @ref ClActivationKernel::configure() * * @return a status */ @@ -66,7 +63,7 @@ public: void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override; private: - bool _run_in_place; + bool _run_in_place{ false }; }; } // namespace kernels } // namespace opencl diff --git a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.cpp b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.cpp index dbc628d6d5..fba1b0e087 100644 --- a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.cpp +++ b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.cpp @@ -60,7 +60,6 @@ Status validate_arguments(const ITensorInfo *src, unsigned int batch_offset, con } // namespace ClBatchConcatenateKernel::ClBatchConcatenateKernel() - : _batch_offset(0) { _type = CLKernelType::ELEMENTWISE; } diff --git a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h index d9fa905e8e..2963d7cdfd 100644 --- a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h +++ b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h @@ -40,7 +40,6 @@ namespace kernels class ClBatchConcatenateKernel : public IClKernel { public: - /** Default constructor */ ClBatchConcatenateKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClBatchConcatenateKernel); /** Initialise the kernel's source and destination @@ -55,11 +54,9 @@ public: * */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int batch_offset, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClBatchConcatenateKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Input tensor info. Data types supported: All. - * @param[in] batch_offset The offset on axis # 3. - * @param[in] dst Destination tensor info. Data types supported: Same as @p src. + * Similar to @ref ClBatchConcatenateKernel::configure() * * @return a status */ @@ -69,7 +66,7 @@ public: void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override; private: - unsigned int _batch_offset; + unsigned int _batch_offset{ 0 }; }; } // namespace kernels } // namespace opencl diff --git a/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.h b/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.h index 6f4f09dc32..3976fd45db 100644 --- a/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.h +++ b/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CLCONVERTFULLYCONNECTEDWEIGHTSKERNEL_H -#define ARM_COMPUTE_CLCONVERTFULLYCONNECTEDWEIGHTSKERNEL_H +#ifndef ARM_COMPUTE_CL_CONVERT_FULLYCONNECTED_WEIGHTS_KERNEL_H +#define ARM_COMPUTE_CL_CONVERT_FULLYCONNECTED_WEIGHTS_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/gpu/cl/ClCompileContext.h" @@ -30,8 +30,6 @@ namespace arm_compute { -class ICLTensor; - /** Interface to convert the 2D Fully Connected weights from NCHW to NHWC or vice versa. * * @note This function can be applied to the 2D weights used by a Fully Connected layer if: @@ -58,12 +56,11 @@ public: * @param[in] data_layout The data layout the weights have been trained in. */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout); - /** Static function to check if given info will lead to a valid configuration of @ref ClConvertFullyConnectedWeightsKernel + /** Static function to check if given info will lead to a valid configuration + * + * Similar to @ref ClConvertFullyConnectedWeightsKernel::configure() * - * @param[in] src Source weights tensor info to convert. Must be 2 dimensional. Data types supported: All. - * @param[in] dst The converted weights tensor info. Shape and Data Type: Same as @p src. - * @param[in] original_src_shape Shape of the original src tensor (the one entering fully connected layer). - * @param[in] data_layout The data layout the weights have been trained in. + * @return a status */ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout); @@ -73,4 +70,4 @@ public: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CLCONVERTFULLYCONNECTEDWEIGHTSKERNEL_H */ +#endif /* ARM_COMPUTE_CL_CONVERT_FULLYCONNECTED_WEIGHTS_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClCopyKernel.h b/src/core/gpu/cl/kernels/ClCopyKernel.h index f3eb0aab62..d2732c4e59 100644 --- a/src/core/gpu/cl/kernels/ClCopyKernel.h +++ b/src/core/gpu/cl/kernels/ClCopyKernel.h @@ -48,11 +48,9 @@ public: * @param[in] dst_window (Optional) Window to be used in case only copying into part of a tensor. Default is nullptr. */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, Window *dst_window = nullptr); - /** Static function to check if given info will lead to a valid configuration of @ref ClCopyKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: All. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. - * @param[in] dst_window (Optional) Window to be used in case only copying into part of a tensor. Default is nullptr. + * Similar to @ref ClCopyKernel::configure() * * @return a status */ @@ -68,4 +66,4 @@ private: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_COPY_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_COPY_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClCropKernel.cpp b/src/core/gpu/cl/kernels/ClCropKernel.cpp index ef2e48b45d..a052ef53f9 100644 --- a/src/core/gpu/cl/kernels/ClCropKernel.cpp +++ b/src/core/gpu/cl/kernels/ClCropKernel.cpp @@ -41,11 +41,6 @@ namespace opencl { namespace kernels { -void ClCropKernel::configure(const ITensorInfo *src, ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value, Window *dst_window) -{ - configure(CLKernelLibrary::get().get_compile_context(), src, dst, start, end, batch_index, extrapolation_value, dst_window); -} - ClCropKernel::ClCropKernel() { _type = CLKernelType::ELEMENTWISE; diff --git a/src/core/gpu/cl/kernels/ClCropKernel.h b/src/core/gpu/cl/kernels/ClCropKernel.h index 7120dbbc81..d81912284e 100644 --- a/src/core/gpu/cl/kernels/ClCropKernel.h +++ b/src/core/gpu/cl/kernels/ClCropKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CLCROPKERNEL_H -#define ARM_COMPUTE_CLCROPKERNEL_H +#ifndef ARM_COMPUTE_CL_CROP_KERNEL_H +#define ARM_COMPUTE_CL_CROP_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/gpu/cl/ClCompileContext.h" @@ -40,19 +40,6 @@ class ClCropKernel : public IClKernel public: ClCropKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClCropKernel); - /** Configure kernel - * - * @note Supported tensor rank: up to 4 - * - * @param[in] src Source tensor info. Data type supported: All. Data layouts supported: NHWC. - * @param[out] dst Destination tensor info. Data type supported: F32 - * @param[in] start Coordinates of where to start cropping the image. - * @param[in] end Coordinates of where to end cropping the image. - * @param[in] batch_index Fourth dimension index of the 3D image to crop in @p src. - * @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0. - * @param[in] dst_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr. - */ - void configure(const ITensorInfo *src, ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0, Window *dst_window = nullptr); /** Configure kernel * * @note Supported tensor rank: up to 4 @@ -68,18 +55,11 @@ public: */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0, Window *dst_window = nullptr); - - /** Static function to check if given info will lead to a valid configuration of @ref CLStridedSliceKernel + /** Static function to check if given info will lead to a valid configuration * - * @note Supported tensor rank: up to 4 + * Similar to @ref ClCropKernel::configure() * - * @param[in] src Source tensor info. Data type supported: All. Data layouts supported: NHWC. - * @param[in] dst Destination tensor info. Data type supported: F32 - * @param[in] start Coordinates of where to start cropping the image. - * @param[in] end Coordinates of where to end cropping the image. - * @param[in] batch_index Fourth dimension index of the 3D image to crop in @p src. - * @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0. - * @param[in] dst_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr. + * @return a status */ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0, Window *dst_window = nullptr); @@ -95,4 +75,4 @@ private: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CLCROPKERNEL_H */ +#endif /* ARM_COMPUTE_CL_CROP_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h b/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h index 103ef00695..0f408477b1 100644 --- a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h +++ b/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h @@ -40,7 +40,6 @@ namespace kernels class ClDepthConcatenateKernel : public IClKernel { public: - /** Default constructor */ ClDepthConcatenateKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDepthConcatenateKernel); /** Initialise the kernel's source and destination @@ -55,11 +54,9 @@ public: * */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int depth_offset, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClDepthConcatenateKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 - * @param[in] depth_offset The offset on the Z axis. - * @param[in] dst Destination tensor info. Data types supported: Same as @p src. + * Similar to @ref ClDepthConcatenateKernel::configure() * * @return a status */ diff --git a/src/core/gpu/cl/kernels/ClDequantizeKernel.h b/src/core/gpu/cl/kernels/ClDequantizeKernel.h index 2460674067..0912e1b228 100644 --- a/src/core/gpu/cl/kernels/ClDequantizeKernel.h +++ b/src/core/gpu/cl/kernels/ClDequantizeKernel.h @@ -38,7 +38,6 @@ namespace kernels class ClDequantizeKernel : public IClKernel { public: - /** Default constructor */ ClDequantizeKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDequantizeKernel); /** Initialise the kernel's input and output diff --git a/src/core/gpu/cl/kernels/ClDirectConv2dKernel.h b/src/core/gpu/cl/kernels/ClDirectConv2dKernel.h index b592a2191b..4880d4a668 100644 --- a/src/core/gpu/cl/kernels/ClDirectConv2dKernel.h +++ b/src/core/gpu/cl/kernels/ClDirectConv2dKernel.h @@ -86,4 +86,4 @@ public: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_DIRECT_CONV2D_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_DIRECT_CONV2D_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClElementwiseKernel.cpp b/src/core/gpu/cl/kernels/ClElementwiseKernel.cpp index 7bfdb9efdd..b645353dd6 100644 --- a/src/core/gpu/cl/kernels/ClElementwiseKernel.cpp +++ b/src/core/gpu/cl/kernels/ClElementwiseKernel.cpp @@ -276,21 +276,12 @@ ClElementwiseKernel::ClElementwiseKernel() _type = CLKernelType::ELEMENTWISE; } -void ClElementwiseKernel::configure_common(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst) -{ - configure_common(CLKernelLibrary::get().get_compile_context(), src1, src2, dst); -} - void ClElementwiseKernel::configure_common(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst) { // Configure kernel window auto win_config = validate_and_configure_window(*src1, *src2, *dst); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - _src1 = src1; - _src2 = src2; - _dst = dst; - std::string kernel_name = "elementwise_operation_" + name(); if(is_data_type_quantized(src1->data_type())) { diff --git a/src/core/gpu/cl/kernels/ClElementwiseKernel.h b/src/core/gpu/cl/kernels/ClElementwiseKernel.h index 7f55151a87..ab5c777ae6 100644 --- a/src/core/gpu/cl/kernels/ClElementwiseKernel.h +++ b/src/core/gpu/cl/kernels/ClElementwiseKernel.h @@ -44,7 +44,6 @@ namespace kernels class ClElementwiseKernel : public IClKernel { public: - /** Default constructor */ ClElementwiseKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClElementwiseKernel); @@ -77,33 +76,17 @@ protected: */ virtual std::string generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &src1, const ITensorInfo &dst) = 0; - /** Commmon configure function for element-wise operators with no additional options (e.g., Div, Min, Max, SquaredDiff) - * - */ - void configure_common(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst); /** Commmon configure function for element-wise operators with no additional options (e.g., Div, Min, Max, SquaredDiff) * */ void configure_common(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst); ActivationLayerInfo _act_info{}; - -private: - const ITensorInfo *_src1 - { - nullptr - }; /**< Source tensor info 1 */ - const ITensorInfo *_src2 - { - nullptr - }; /**< Source tensor info 2 */ - ITensorInfo *_dst{ nullptr }; /**< Destination tensor info */ }; class ClLogicalBinaryKernel : public ClElementwiseKernel { public: - /** Default constructor */ ClLogicalBinaryKernel() = default; ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogicalBinaryKernel); /** Function to configure kernel @@ -115,12 +98,11 @@ public: * @param[in] dst Destination tensor info. Data types supported: same as @p src1. */ void configure(const ClCompileContext &compile_context, LogicalOperation op, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst); - /** Static function to check if the given configuration is valid for this kernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] op Logical binary operation to be executed. - * @param[in] src1 First source tensor info. Data types supported: U8. - * @param[in] src2 Second source tensor info. Data types supported: same as @p src1. - * @param[in] dst Destination tensor info. Data types supported: same as @p src1. + * Similar to @ref ClLogicalBinaryKernel::configure() + * + * @return a status */ static Status validate(LogicalOperation op, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst); @@ -153,16 +135,11 @@ public: void configure(const ClCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ConvertPolicy &policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref ClSaturatedArithmeticKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] op Arithmetic operation to be executed. - * @param[in] input1 First tensor input info info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] input2 Second tensor input info info. Data types supported: Same as @p input1. - * @param[in] output Output tensor info info. Data types supported: Same as @p input1. - * @param[in] policy Policy to use to handle overflow. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * Similar to @ref ClSaturatedArithmeticKernel::configure() * - * @return a Status + * @return a status */ static Status validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); @@ -197,15 +174,11 @@ public: void configure(const ClCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref ClArithmeticKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] op Arithmetic operation to be executed. - * @param[in] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] src2 Second source tensor info. Data types supported: same as @p src1. - * @param[in] dst Destination tensor info. Data types supported: same as @p src1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * Similar to @ref ClArithmeticKernel::configure() * - * @return a Status + * @return a status */ static Status validate(ArithmeticOperation op, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo()); diff --git a/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h b/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h index 225869b58b..64cc2f7afc 100644 --- a/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h +++ b/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h @@ -48,11 +48,9 @@ public: * @param[in] op Element wise unary operation to perform. */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const ElementWiseUnary &op); - /** Static function to check if given info will lead to a valid configuration of @ref ClElementWiseUnaryKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. - * @param[in] op Element wise unary operation to perform. + * Similar to @ref ClElementWiseUnaryKernel::configure() * * @return a status */ diff --git a/src/core/gpu/cl/kernels/ClFillKernel.cpp b/src/core/gpu/cl/kernels/ClFillKernel.cpp index 526a466a00..f213bf8e6a 100644 --- a/src/core/gpu/cl/kernels/ClFillKernel.cpp +++ b/src/core/gpu/cl/kernels/ClFillKernel.cpp @@ -47,13 +47,6 @@ ClFillKernel::ClFillKernel() _type = CLKernelType::ELEMENTWISE; } -void ClFillKernel::configure(ITensorInfo *tensor, - const PixelValue &constant_value, - Window *window) -{ - configure(CLKernelLibrary::get().get_compile_context(), tensor, constant_value, window); -} - void ClFillKernel::configure(const CLCompileContext &compile_context, ITensorInfo *tensor, const PixelValue &constant_value, Window *window) diff --git a/src/core/gpu/cl/kernels/ClFillKernel.h b/src/core/gpu/cl/kernels/ClFillKernel.h index 9542c20508..ecc2546e4a 100644 --- a/src/core/gpu/cl/kernels/ClFillKernel.h +++ b/src/core/gpu/cl/kernels/ClFillKernel.h @@ -40,13 +40,6 @@ class ClFillKernel : public IClKernel public: ClFillKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClFillKernel); - /** Initialise the kernel's tensor and filling value - * - * @param[in,out] tensor Input tensor info. Supported data types: All. - * @param[in] constant_value The value used to fill the planes of the tensor - * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr. - */ - void configure(ITensorInfo *tensor, const PixelValue &constant_value, Window *window = nullptr); /** Initialise the kernel's tensor and filling value * * @param[in] compile_context The compile context to be used. @@ -55,11 +48,9 @@ public: * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr. */ void configure(const CLCompileContext &compile_context, ITensorInfo *tensor, const PixelValue &constant_value, Window *window = nullptr); - /** Static function to check if given info will lead to a valid configuration of @ref ClFillKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] tensor Source tensor info. Data types supported: All. - * @param[in] constant_value The value used to fill the planes of the tensor - * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr. + * Similar to @ref ClFillKernel::configure() * * @return a status */ @@ -74,4 +65,4 @@ private: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CLMEMSETRKERNEL_H */ +#endif /* ARM_COMPUTE_CL_FILL_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClFloorKernel.h b/src/core/gpu/cl/kernels/ClFloorKernel.h index 3bc648b7be..57c9906f2c 100644 --- a/src/core/gpu/cl/kernels/ClFloorKernel.h +++ b/src/core/gpu/cl/kernels/ClFloorKernel.h @@ -47,11 +47,9 @@ public: * @param[out] dst Destination tensor info. Same as @p src */ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - - /** Static function to check if given info will lead to a valid configuration of @ref ClFloorKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data type supported: F16/F32. - * @param[in] dst Destination tensor info. Same as @p src + * Similar to @ref ClFloorKernel::configure() * * @return a status */ diff --git a/src/core/gpu/cl/kernels/ClGemmLowpMatrixMultiplyNativeKernel.h b/src/core/gpu/cl/kernels/ClGemmLowpMatrixMultiplyNativeKernel.h index 491c3e44df..eaa125fbf2 100644 --- a/src/core/gpu/cl/kernels/ClGemmLowpMatrixMultiplyNativeKernel.h +++ b/src/core/gpu/cl/kernels/ClGemmLowpMatrixMultiplyNativeKernel.h @@ -78,4 +78,4 @@ private: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /* ARM_COMPUTE_CL_GEMMLOWP_MATRIXMULTIPLY_NATIVE_KERNEL_H*/ +#endif /* ARM_COMPUTE_CL_GEMMLOWP_MATRIXMULTIPLY_NATIVE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedKernel.h b/src/core/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedKernel.h index b99dec33af..99cff011d1 100644 --- a/src/core/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedKernel.h +++ b/src/core/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedKernel.h @@ -87,4 +87,4 @@ private: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /* ARM_COMPUTE_CL_GEMMLOWP_MATRIXMULTIPLY_RESHAPED_KERNEL_H*/ +#endif /* ARM_COMPUTE_CL_GEMMLOWP_MATRIXMULTIPLY_RESHAPED_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h index c3bdc7589e..cd7bf278c2 100644 --- a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h +++ b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h @@ -85,4 +85,4 @@ private: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_NATIVE_KERNEL_H*/ +#endif /* ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_NATIVE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h b/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h index 9a4380a5b7..0733078fc2 100644 --- a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h +++ b/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_HEIGHT_CONCATENATE_LAYER_KERNEL_H -#define ARM_COMPUTE_CL_HEIGHT_CONCATENATE_LAYER_KERNEL_H +#ifndef ARM_COMPUTE_CL_HEIGHT_CONCATENATE_KERNEL_H +#define ARM_COMPUTE_CL_HEIGHT_CONCATENATE_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/gpu/cl/ClCompileContext.h" @@ -40,7 +40,6 @@ namespace kernels class ClHeightConcatenateKernel : public IClKernel { public: - /** Default constructor */ ClHeightConcatenateKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClHeightConcatenateKernel); /** Initialise the kernel's source and destination @@ -52,11 +51,9 @@ public: * */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int height_offset, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClHeightConcatenateKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: All. - * @param[in] height_offset The starting offset on the Y axis for the dst tensor. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to @ref ClHeightConcatenateKernel::configure() * * @return a status */ @@ -71,4 +68,4 @@ private: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /* ARM_COMPUTE_CL_HEIGHT_CONCATENATE_LAYER_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_HEIGHT_CONCATENATE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClMulKernel.h b/src/core/gpu/cl/kernels/ClMulKernel.h index 9c70301d89..dec8dba61c 100644 --- a/src/core/gpu/cl/kernels/ClMulKernel.h +++ b/src/core/gpu/cl/kernels/ClMulKernel.h @@ -38,7 +38,6 @@ namespace kernels class ClMulKernel : public IClKernel { public: - /** Default constructor */ ClMulKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClMulKernel); /** Initialise the kernel's src and dst. @@ -87,7 +86,6 @@ public: class ClComplexMulKernel : public ICLKernel { public: - /** Default constructor */ ClComplexMulKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClComplexMulKernel); /** Initialise the kernel's src and dst. diff --git a/src/core/gpu/cl/kernels/ClPermuteKernel.h b/src/core/gpu/cl/kernels/ClPermuteKernel.h index 326110a27c..839e224ee4 100644 --- a/src/core/gpu/cl/kernels/ClPermuteKernel.h +++ b/src/core/gpu/cl/kernels/ClPermuteKernel.h @@ -41,7 +41,6 @@ namespace kernels class ClPermuteKernel : public IClKernel { public: - /** Default constructor */ ClPermuteKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClPermuteKernel); /** Set the src and dst of the kernel. @@ -54,13 +53,9 @@ public: * @param[in] perm Permutation vector */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm); - /** Static function to check if given info will lead to a valid configuration of @ref ClPermuteKernel + /** Static function to check if given info will lead to a valid configuration * - * @note Arbitrary permutation vectors are supported with rank not greater than 4 - * - * @param[in] src The src tensor info. Data types supported: All. - * @param[in] dst The dst tensor info. Data types supported: same as @p src. - * @param[in] perm Permutation vector + * Similar to @ref ClPermuteKernel::configure() * * @return a status */ @@ -75,4 +70,4 @@ private: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_PERMUTE_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_PERMUTE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClPool2dKernel.cpp b/src/core/gpu/cl/kernels/ClPool2dKernel.cpp index 9d5a24fdf2..e522814b6d 100644 --- a/src/core/gpu/cl/kernels/ClPool2dKernel.cpp +++ b/src/core/gpu/cl/kernels/ClPool2dKernel.cpp @@ -203,7 +203,6 @@ std::tuple validate_and_configure_window(ITenso } // namespace ClPool2dKernel::ClPool2dKernel() - : _pool_info(), _data_layout(DataLayout::UNKNOWN), _border_size(0), _num_elems_processed_per_iteration(1) { _type = CLKernelType::POOL; } diff --git a/src/core/gpu/cl/kernels/ClPool2dKernel.h b/src/core/gpu/cl/kernels/ClPool2dKernel.h index 8ecb8eb7b7..ab8c56a857 100644 --- a/src/core/gpu/cl/kernels/ClPool2dKernel.h +++ b/src/core/gpu/cl/kernels/ClPool2dKernel.h @@ -38,7 +38,6 @@ namespace kernels class ClPool2dKernel : public IClKernel { public: - /** Default constructor */ ClPool2dKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClPool2dKernel); @@ -65,10 +64,10 @@ public: BorderSize border_size() const override; public: - PoolingLayerInfo _pool_info; - DataLayout _data_layout; - BorderSize _border_size; - unsigned int _num_elems_processed_per_iteration; + PoolingLayerInfo _pool_info{}; + DataLayout _data_layout{ DataLayout::UNKNOWN }; + BorderSize _border_size{ 0 }; + unsigned int _num_elems_processed_per_iteration{ 1 }; }; } // namespace kernels } // namespace opencl diff --git a/src/core/gpu/cl/kernels/ClQuantizeKernel.h b/src/core/gpu/cl/kernels/ClQuantizeKernel.h index cd972987f5..1991a2fba8 100644 --- a/src/core/gpu/cl/kernels/ClQuantizeKernel.h +++ b/src/core/gpu/cl/kernels/ClQuantizeKernel.h @@ -41,7 +41,6 @@ namespace kernels class ClQuantizeKernel : public IClKernel { public: - /** Default constructor */ ClQuantizeKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClQuantizeKernel); /** Set the input, output. diff --git a/src/core/gpu/cl/kernels/ClReshapeKernel.h b/src/core/gpu/cl/kernels/ClReshapeKernel.h index 3cd8369012..01e1ee84b9 100644 --- a/src/core/gpu/cl/kernels/ClReshapeKernel.h +++ b/src/core/gpu/cl/kernels/ClReshapeKernel.h @@ -47,11 +47,9 @@ public: * @param[out] dst Destination tensor info. Data type supported: Same as @p src */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - - /** Static function to check if given info will lead to a valid configuration of @ref ClReshapeKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data type supported: All - * @param[in] dst Destination tensor info. Data type supported: Same as @p src + * Similar to @ref ClReshapeKernel::configure() * * @return a status */ @@ -63,4 +61,4 @@ public: } // namespace opencl } // namespace kernels } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_RESHAPE_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_RESHAPE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClScaleKernel.h b/src/core/gpu/cl/kernels/ClScaleKernel.h index 826c4821b5..8333c7d6c0 100644 --- a/src/core/gpu/cl/kernels/ClScaleKernel.h +++ b/src/core/gpu/cl/kernels/ClScaleKernel.h @@ -31,8 +31,6 @@ namespace arm_compute { -class ICLTensor; - namespace opencl { namespace kernels @@ -41,10 +39,8 @@ namespace kernels class ClScaleKernel : public IClKernel { public: - /** Default constructor */ ClScaleKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClScaleKernel); - /** Initialise the kernel's inputs, output and interpolation policy * * @param[in] compile_context The compile context to be used. @@ -54,13 +50,9 @@ public: * @param[in] info @ref ScaleKernelInfo Kernel descriptor to be used to configure. */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info); - - /** Static function to check if given info will lead to a valid configuration of @ref ClScaleKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32 - * @param[in] dst Destination tensor info. Data types supported: Same as @p src - * All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane. - * @param[in] info @ref ScaleKernelInfo Kernel descriptor to be used to validate + * Similar to @ref ClScaleKernel::configure() * * @return a status */ @@ -71,9 +63,9 @@ public: void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; private: - DataLayout _data_layout = DataLayout::UNKNOWN; + DataLayout _data_layout{ DataLayout::UNKNOWN }; }; } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CLSCALEKERNEL_H */ +#endif /* ARM_COMPUTE_CL_SCALE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClSoftmaxKernel.h b/src/core/gpu/cl/kernels/ClSoftmaxKernel.h index db1aca3b9b..a2ad02d6b7 100644 --- a/src/core/gpu/cl/kernels/ClSoftmaxKernel.h +++ b/src/core/gpu/cl/kernels/ClSoftmaxKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H -#define ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H +#ifndef ARM_COMPUTE_CL_SOFTMAX_KERNEL_H +#define ARM_COMPUTE_CL_SOFTMAX_KERNEL_H #include "arm_compute/core/Error.h" #include "arm_compute/core/KernelDescriptors.h" @@ -50,7 +50,6 @@ public: /** Info for whether a parallel reduction will be run and the vector size of the execution. */ using ParallelReductionInfo = std::tuple; - /** Default constructor */ ClLogits1DMaxShiftExpSumKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogits1DMaxShiftExpSumKernel); /** Configure the kernel using the given information about tensors @@ -63,12 +62,9 @@ public: * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. */ void configure(const CLCompileContext &compile_context, const ITensorInfo &src, ITensorInfo &max, ITensorInfo &dst, ITensorInfo &sum, const SoftmaxKernelInfo &info); - /** Static function to check if given info will lead to a valid configuration of @ref ClLogits1DMaxShiftExpSumKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 - * @param[in] max Max values tensor. Data types supported: same as @p src - * @param[in] dst Destination tensor. Data types supported: same as @p src - * @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p src + * Similar to @ref ClLogits1DMaxShiftExpSumKernel::configure() * * @return a status */ @@ -93,7 +89,6 @@ public: class ClLogits1DNormKernel : public IClKernel { public: - /** Default constructor */ ClLogits1DNormKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogits1DNormKernel); @@ -106,12 +101,9 @@ public: * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. */ void configure(const CLCompileContext &compile_context, const ITensorInfo &src, const ITensorInfo &sum, ITensorInfo &dst, const SoftmaxKernelInfo &info); - /** Static function to check if given info will lead to a valid configuration of @ref ClLogits1DNormKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported. - * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input - * @param[in] dst Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input - * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. + * Similar to @ref ClLogits1DNormKernel::configure() * * @return a status */ @@ -123,4 +115,4 @@ public: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H */ +#endif /* ARM_COMPUTE_CL_SOFTMAX_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClTransposeKernel.h b/src/core/gpu/cl/kernels/ClTransposeKernel.h index 7d1226cc0d..c8379d44c7 100644 --- a/src/core/gpu/cl/kernels/ClTransposeKernel.h +++ b/src/core/gpu/cl/kernels/ClTransposeKernel.h @@ -47,10 +47,9 @@ public: * @param[in] dst The dst tensor info. Data types supported: Same as @p src */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClTransposeKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src The src tensor info. Data types supported: All. - * @param[in] dst The dst tensor info. Data types supported: same as @p src. + * Similar to @ref ClTransposeKernel::configure() * * @return a status */ @@ -62,4 +61,4 @@ public: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_TRANSPOSE_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_TRANSPOSE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h index 56202bad2e..15e0757aec 100644 --- a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h +++ b/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h @@ -40,7 +40,6 @@ namespace kernels class ClWidthConcatenate2TensorsKernel : public IClKernel { public: - /** Default constructor */ ClWidthConcatenate2TensorsKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWidthConcatenate2TensorsKernel); /** Initialise the kernel's sources and destination @@ -51,11 +50,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: Same as @p src1. */ void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenate2TensorsKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src1 First tensor info. Data types supported: All. - * @param[in] src2 Second tensor info. Data types supported: same as @p src1 - * @param[in] dst Destination tensor info. Data types supported: Same as @p src1. + * Similar to @ref ClWidthConcatenate2TensorsKernel::configure() * * @return a status */ diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h index 19bda65902..1e3f47f7fb 100644 --- a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h +++ b/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h @@ -41,7 +41,6 @@ namespace kernels class ClWidthConcatenate4TensorsKernel : public IClKernel { public: - /** Default constructor */ ClWidthConcatenate4TensorsKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWidthConcatenate4TensorsKernel); /** Initialise the kernel's sources and destination @@ -54,13 +53,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: same as @p src1. */ void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *src3, ITensorInfo *src4, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenate4TensorsKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src1 First tensor info. Data types supported: All. - * @param[in] src2 Second tensor info. Data types supported: same as @p src1 - * @param[in] src3 Third tensor info. Data types supported: same as @p src1 - * @param[in] src4 Fourth tensor info. Data types supported: same as @p src1 - * @param[in] dst Destination tensor info. Data types supported: same as @p src1. + * Similar to @ref ClWidthConcatenate4TensorsKernel::configure() * * @return a status */ diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h index 6bc8e57a08..300c4beb30 100644 --- a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h +++ b/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_WIDTH_CONCATENATE_LAYER_KERNEL_H -#define ARM_COMPUTE_CL_WIDTH_CONCATENATE_LAYER_KERNEL_H +#ifndef ARM_COMPUTE_CL_WIDTH_CONCATENATE_KERNEL_H +#define ARM_COMPUTE_CL_WIDTH_CONCATENATE_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/gpu/cl/ClCompileContext.h" @@ -40,7 +40,6 @@ namespace kernels class ClWidthConcatenateKernel : public IClKernel { public: - /** Default constructor */ ClWidthConcatenateKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWidthConcatenateKernel); /** Initialise the kernel's source and destination @@ -52,11 +51,9 @@ public: * */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenateKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: All. - * @param[in] width_offset The offset on the X axis. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to @ref ClWidthConcatenateKernel::configure() * * @return a status */ @@ -68,4 +65,4 @@ public: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /* ARM_COMPUTE_CL_WIDTH_CONCATENATE_LAYER_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_WIDTH_CONCATENATE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h b/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h index 13200dc419..145954fbb1 100644 --- a/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h +++ b/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h @@ -39,7 +39,6 @@ namespace kernels class ClWinogradFilterTransformKernel : public IClKernel { public: - /** Default constructor */ ClWinogradFilterTransformKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWinogradFilterTransformKernel); /** Set the input and output tensor. @@ -75,4 +74,4 @@ public: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_WINOGRAD_FILTER_TRANSFORM_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_WINOGRAD_FILTER_TRANSFORM_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp index 62db2282e0..538d8ae602 100644 --- a/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp +++ b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp @@ -101,7 +101,6 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen } // namespace ClWinogradInputTransformKernel::ClWinogradInputTransformKernel() - : _border_size(0), _data_layout(DataLayout::UNKNOWN), _num_tiles_x(0), _num_tiles_y(0), _step_z(1) { _type = CLKernelType::WINOGRAD; } diff --git a/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h index 76b45279a4..40fc2f387a 100644 --- a/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h +++ b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h @@ -39,7 +39,6 @@ namespace kernels class ClWinogradInputTransformKernel : public IClKernel { public: - /** Default constructor */ ClWinogradInputTransformKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWinogradInputTransformKernel); /** Set the input and output of the kernel. @@ -76,13 +75,13 @@ public: private: using WinogradKey = std::pair, std::pair>; - BorderSize _border_size; - DataLayout _data_layout; - int _num_tiles_x; - int _num_tiles_y; - unsigned int _step_z; + BorderSize _border_size{ 0 }; + DataLayout _data_layout{ DataLayout::UNKNOWN }; + int _num_tiles_x{ 0 }; + int _num_tiles_y{ 0 }; + unsigned int _step_z{ 1 }; }; } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_WINOGRAD_INPUT_TRANSFORM_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_WINOGRAD_INPUT_TRANSFORM_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h b/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h index 2948d3f181..22b7f079c1 100644 --- a/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h +++ b/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h @@ -39,7 +39,6 @@ namespace kernels class ClWinogradOutputTransformKernel : public IClKernel { public: - /** Default constructor */ ClWinogradOutputTransformKernel(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWinogradOutputTransformKernel); /** Set the input and output tensor. @@ -64,7 +63,6 @@ public: */ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration * * Similar to ClWinogradOutputTransformKernel::configure() @@ -84,4 +82,4 @@ private: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_WINOGRAD_OUTPUT_TRANSFORM_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_WINOGRAD_OUTPUT_TRANSFORM_KERNEL_H */ diff --git a/src/runtime/cpu/operators/CpuActivation.h b/src/runtime/cpu/operators/CpuActivation.h index 0ae16bf958..ded4a37edb 100644 --- a/src/runtime/cpu/operators/CpuActivation.h +++ b/src/runtime/cpu/operators/CpuActivation.h @@ -34,8 +34,6 @@ namespace cpu class CpuActivation : public ICpuOperator { public: - /** Constructor */ - CpuActivation() = default; /** Configure operator for a given list of arguments * * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32. @@ -43,11 +41,9 @@ public: * @param[in] activation_info Activation layer parameters. */ void configure(const ITensorInfo *input, ITensorInfo *output, const ActivationLayerInfo &activation_info); - /** Static function to check if given info will lead to a valid configuration of @ref CpuActivation + /** Static function to check if given info will lead to a valid configuration * - * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32. - * @param[in] output Destination tensor info. Data type supported: same as @p src - * @param[in] act_info Activation layer information. + * Similar to @ref CpuActivation::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuAdd.h b/src/runtime/cpu/operators/CpuAdd.h index 8ae7833f01..febb79e4cd 100644 --- a/src/runtime/cpu/operators/CpuAdd.h +++ b/src/runtime/cpu/operators/CpuAdd.h @@ -34,8 +34,6 @@ namespace cpu class CpuAdd : public ICpuOperator { public: - /** Constructor */ - CpuAdd() = default; /** Initialise the kernel's input, dst and border mode. * * Valid configurations (src0,src1) -> dst : @@ -60,13 +58,9 @@ public: * */ void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref CpuAdd + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src0 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 - * @param[in] src1 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32 - * @param[in] dst The dst tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] policy Overflow policy. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + * Similar to @ref CpuAdd::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuCast.h b/src/runtime/cpu/operators/CpuCast.h index 2aea2d2b09..26f5740b86 100644 --- a/src/runtime/cpu/operators/CpuCast.h +++ b/src/runtime/cpu/operators/CpuCast.h @@ -34,8 +34,6 @@ namespace cpu class CpuCast : public ICpuOperator { public: - /** Constructor */ - CpuCast() = default; /** Configure operator for a given list of arguments * * Input data type must be different than output data type. diff --git a/src/runtime/cpu/operators/CpuConcatenate.cpp b/src/runtime/cpu/operators/CpuConcatenate.cpp index 23eb3fceab..bb475b790e 100644 --- a/src/runtime/cpu/operators/CpuConcatenate.cpp +++ b/src/runtime/cpu/operators/CpuConcatenate.cpp @@ -42,11 +42,6 @@ namespace arm_compute { namespace cpu { -CpuConcatenate::CpuConcatenate() - : _concat_kernels(), _num_srcs(0), _axis(0) -{ -} - void CpuConcatenate::configure(const std::vector &srcs_vector, ITensorInfo *dst, size_t axis) { ARM_COMPUTE_ERROR_ON(dst == nullptr); diff --git a/src/runtime/cpu/operators/CpuConcatenate.h b/src/runtime/cpu/operators/CpuConcatenate.h index d2af3e2ad2..55eab54996 100644 --- a/src/runtime/cpu/operators/CpuConcatenate.h +++ b/src/runtime/cpu/operators/CpuConcatenate.h @@ -43,8 +43,7 @@ namespace cpu class CpuConcatenate : public ICpuOperator { public: - /** Constructor */ - CpuConcatenate(); + CpuConcatenate() = default; /** Configure operator for a given list of arguments * * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis. @@ -56,15 +55,9 @@ public: * @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3. */ void configure(const std::vector &srcs_vector, ITensorInfo *dst, size_t axis); - /** Static function to check if given info will lead to a valid configuration of @ref NEConcatenateLayer + /** Static function to check if given info will lead to a valid configuration * - * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis. - * @note Preconditions can be found respectively at @ref kernels::CpuConcatenateWidthKernel, @ref kernels::CpuConcatenateHeightKernel, - * @ref kernels::CpuConcatenateDepthKernel and @ref kernels::CpuConcatenateBatchKernel. - * - * @param[in] srcs_vector The vectors containing all the tensors info to concatenate. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] dst Output tensor info. Data types supported: Same as @p srcs_vector. - * @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3. + * Similar to @ref CpuConcatenate::configure() * * @return a status */ @@ -74,9 +67,9 @@ public: void run(ITensorPack &tensors) override; private: - std::vector> _concat_kernels; - unsigned int _num_srcs; - unsigned int _axis; + std::vector> _concat_kernels{}; + unsigned int _num_srcs{ 0 }; + unsigned int _axis{ 0 }; }; } // namespace cpu } // namespace arm_compute diff --git a/src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h b/src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h index 3f1ddf1dbe..53ee17f6d1 100644 --- a/src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h +++ b/src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_CONVERTFULLYCONNECTEDWEIGHTS_H -#define ARM_COMPUTE_CPU_CONVERTFULLYCONNECTEDWEIGHTS_H +#ifndef ARM_COMPUTE_CPU_CONVERT_FULLYCONNECTED_WEIGHTS_H +#define ARM_COMPUTE_CPU_CONVERT_FULLYCONNECTED_WEIGHTS_H #include "src/runtime/cpu/ICpuOperator.h" @@ -34,8 +34,6 @@ namespace cpu class CpuConvertFullyConnectedWeights : public ICpuOperator { public: - /** Constructor */ - CpuConvertFullyConnectedWeights() = default; /** Configure operator for a given list of arguments * * @param[in] src Source tensor to permute. Data types supported: All @@ -44,12 +42,9 @@ public: * @param[in] data_layout The data layout the weights have been trained in. */ void configure(const ITensorInfo *src, ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout); - /** Static function to check if given info will lead to a valid configuration of @ref CpuConvertFullyConnectedWeights + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor to permute. Data types supported: All - * @param[in] dst Destination tensor. Data types supported: Same as @p dst - * @param[in] original_src_shape Shape of the original src tensor (the one entering fully connected layer). - * @param[in] data_layout The data layout the weights have been trained in. + * Similar to @ref CpuConvertFullyConnectedWeights::configure() * * @return a status */ @@ -59,4 +54,4 @@ public: }; } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_CONVERTFULLYCONNECTEDWEIGHTS_H */ +#endif /* ARM_COMPUTE_CPU_CONVERT_FULLYCONNECTED_WEIGHTS_H */ diff --git a/src/runtime/cpu/operators/CpuCopy.h b/src/runtime/cpu/operators/CpuCopy.h index 057bb6efa5..861bbb7849 100644 --- a/src/runtime/cpu/operators/CpuCopy.h +++ b/src/runtime/cpu/operators/CpuCopy.h @@ -34,19 +34,15 @@ namespace cpu class CpuCopy : public ICpuOperator { public: - /** Constructor */ - CpuCopy() = default; /** Configure operator for a given list of arguments * * @param[in] src Source tensor info. Data type supported: All * @param[out] dst Destination info. Data type supported: Same as @p src */ void configure(const ITensorInfo *src, ITensorInfo *dst); - - /** Static function to check if given info will lead to a valid configuration of @ref CpuCopy + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data type supported: All - * @param[in] dst Destination tensor info. Data type supported: Same as @p src + * Similar to @ref CpuCopy::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp b/src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp index f577e94def..8141487125 100644 --- a/src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp +++ b/src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp @@ -71,12 +71,6 @@ Status validate_arguments_optimized(const ITensorInfo *src, const ITensorInfo *w } } // namespace -CpuDepthwiseConv2d::CpuDepthwiseConv2dOptimizedInternal::CpuDepthwiseConv2dOptimizedInternal() - : _dwc_optimized_func(nullptr), _permute_input(nullptr), _permute_weights(nullptr), _permute_output(nullptr), _activationlayer_function(nullptr), _has_bias(false), _is_quantized(false), - _is_nchw(true), _permute(false), _is_activationlayer_enabled(false), _is_prepared(false) -{ -} - void CpuDepthwiseConv2d::CpuDepthwiseConv2dOptimizedInternal::configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, @@ -264,12 +258,6 @@ void CpuDepthwiseConv2d::CpuDepthwiseConv2dOptimizedInternal::prepare(ITensorPac } } -CpuDepthwiseConv2d::CpuDepthwiseConv2dGeneric::CpuDepthwiseConv2dGeneric() - : _depthwise_conv_kernel(nullptr), _permute_input(nullptr), _permute_weights(nullptr), _permute_output(nullptr), _activationlayer_function(nullptr), _is_nchw(true), _is_prepared(false), - _is_activationlayer_enabled(false) -{ -} - void CpuDepthwiseConv2d::CpuDepthwiseConv2dGeneric::configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst); @@ -432,11 +420,6 @@ void CpuDepthwiseConv2d::CpuDepthwiseConv2dGeneric::prepare(ITensorPack &tensors } } -CpuDepthwiseConv2d::CpuDepthwiseConv2d() - : _depth_conv_func(DepthwiseConvolutionFunction::GENERIC), _func_optimized(), _func_generic() -{ -} - void CpuDepthwiseConv2d::configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info) { _depth_conv_func = get_depthwiseconvolution_function(src, weights, (biases != nullptr) ? biases : nullptr, dst, info); diff --git a/src/runtime/cpu/operators/CpuDepthwiseConv2d.h b/src/runtime/cpu/operators/CpuDepthwiseConv2d.h index ae9f894aab..dd4839b28a 100644 --- a/src/runtime/cpu/operators/CpuDepthwiseConv2d.h +++ b/src/runtime/cpu/operators/CpuDepthwiseConv2d.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_DEPTHWISECONV2D_H -#define ARM_COMPUTE_CPU_DEPTHWISECONV2D_H +#ifndef ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_H +#define ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_H #include "arm_compute/core/ITensorInfo.h" #include "arm_compute/core/experimental/Types.h" @@ -45,7 +45,7 @@ class CpuDepthwiseConv2d : public ICpuOperator { public: /** Default constructor */ - CpuDepthwiseConv2d(); + CpuDepthwiseConv2d() = default; /** Initialize the function's source, destination, weights and convolution information. * * @param[in, out] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32 @@ -57,7 +57,6 @@ public: * @param[in] info Depthwise convolution meta-data. */ void configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info); - /** Static function to check if given info will lead to a valid configuration * * Similar to CpuDepthwiseConv2d::configure() @@ -65,7 +64,6 @@ public: * @return a status */ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ConvolutionInfo &info); - /** Static function to choose the best depthwise convolution function for @ref CpuDepthwiseConv2d * * @param[in] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32 @@ -100,7 +98,7 @@ private: { public: /** Default constructor */ - CpuDepthwiseConv2dOptimizedInternal(); + CpuDepthwiseConv2dOptimizedInternal() = default; /** Prevent instances of this class from being copied (As this class contains pointers) */ CpuDepthwiseConv2dOptimizedInternal(const CpuDepthwiseConv2dOptimizedInternal &) = delete; /** Default move constructor */ @@ -121,7 +119,6 @@ private: * @param[in] info Depthwise convolution meta-data. */ void configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info); - /** Static function to check if given info will lead to a valid configuration * * Similar to CpuDepthwiseConv2dOptimizedInternal::configure() @@ -157,7 +154,7 @@ private: { public: /** Default constructor */ - CpuDepthwiseConv2dGeneric(); + CpuDepthwiseConv2dGeneric() = default; /** Prevent instances of this class from being copied (As this class contains pointers) */ CpuDepthwiseConv2dGeneric(const CpuDepthwiseConv2dGeneric &) = delete; /** Default move constructor */ @@ -203,10 +200,10 @@ private: bool _is_activationlayer_enabled{ false }; }; - DepthwiseConvolutionFunction _depth_conv_func; - CpuDepthwiseConv2dOptimizedInternal _func_optimized; - CpuDepthwiseConv2dGeneric _func_generic; + DepthwiseConvolutionFunction _depth_conv_func{ DepthwiseConvolutionFunction::GENERIC }; + CpuDepthwiseConv2dOptimizedInternal _func_optimized{}; + CpuDepthwiseConv2dGeneric _func_generic{}; }; } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_DEPTHWISECONV2D_H */ +#endif /* ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_H */ diff --git a/src/runtime/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h b/src/runtime/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h index 70845163f4..f3d3b618c6 100644 --- a/src/runtime/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h +++ b/src/runtime/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h @@ -35,10 +35,8 @@ namespace cpu class CpuDepthwiseConv2dAssemblyDispatch : public ICpuOperator { public: - /** Default constructor */ CpuDepthwiseConv2dAssemblyDispatch(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDepthwiseConv2dAssemblyDispatch); - /** Default destructor */ ~CpuDepthwiseConv2dAssemblyDispatch(); /** Initialize the function's source, destination, kernels and border_size. * @@ -67,6 +65,7 @@ public: * @return True if activation is supported else false */ static bool is_activation_supported(const ActivationLayerInfo &activation); + // Inherited methods overridden: void run(ITensorPack &tensors) override; void prepare(ITensorPack &tensors) override; diff --git a/src/runtime/cpu/operators/CpuDequantize.h b/src/runtime/cpu/operators/CpuDequantize.h index d1fb9e8d0e..fdbd6a57c2 100644 --- a/src/runtime/cpu/operators/CpuDequantize.h +++ b/src/runtime/cpu/operators/CpuDequantize.h @@ -34,8 +34,6 @@ namespace cpu class CpuDequantize : public ICpuOperator { public: - /** Default Constructor */ - CpuDequantize() = default; /** Configure the kernel. * * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16. diff --git a/src/runtime/cpu/operators/CpuDirectConv2d.h b/src/runtime/cpu/operators/CpuDirectConv2d.h index 9e584b9c49..c17b076f85 100644 --- a/src/runtime/cpu/operators/CpuDirectConv2d.h +++ b/src/runtime/cpu/operators/CpuDirectConv2d.h @@ -55,9 +55,7 @@ namespace cpu class CpuDirectConv2d : public ICpuOperator { public: - /** Constructor */ CpuDirectConv2d(std::shared_ptr memory_manager = nullptr); - /** Destructor */ ~CpuDirectConv2d(); /** Set the input, weights, biases and output tensors. * diff --git a/src/runtime/cpu/operators/CpuElementwise.h b/src/runtime/cpu/operators/CpuElementwise.h index 899a2ffdb7..ef5caf2825 100644 --- a/src/runtime/cpu/operators/CpuElementwise.h +++ b/src/runtime/cpu/operators/CpuElementwise.h @@ -52,13 +52,11 @@ public: * @param[out] dst The output tensor information. */ void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst); - /** Static function to check if the given information will lead to a valid configuration + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src0 The first source tensor information. - * @param[in] src1 The second source tensor information. With PRelu, this is used as alpha tensor. - * @param[out] dst The output tensor information. + * Similar to @ref CpuElementwiseArithmetic::configure() * - * @return A status + * @return a status */ static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst); }; @@ -85,11 +83,9 @@ public: * @param[out] dst Output tensor info. Data types supported: Same as @p src0. */ void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref cpu::kernels::CpuArithmeticKernel for division + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src0 First tensor input info. Data types supported: S32/F16/F32. - * @param[in] src1 Second tensor input info. Data types supported: Same as @p src0. - * @param[in] dst Output tensor info. Data types supported: Same as @p src0. + * Similar to @ref CpuElementwiseDivision::configure() * * @return a status */ @@ -112,11 +108,9 @@ public: * @param[out] dst Output tensor info. Data types supported: Same as @p src0. */ void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref cpu::kernels::CpuArithmeticKernel for power + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src0 First tensor input info. Data types supported: F16/F32. - * @param[in] src1 Second tensor input info. Data types supported: Same as @p src0. - * @param[in] dst Output tensor info. Data types supported: Same as @p src0. + * Similar to @ref CpuElementwisePower::configure() * * @return a status */ @@ -139,12 +133,9 @@ public: * @param[in] op Comparison Operation to be performed. */ void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ComparisonOperation op); - /** Static function to check if given info will lead to a valid configuration of @ref cpu::kernels::CpuComparisonKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src0 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. - * @param[in] src1 Second tensor input info. Data types supported: Same as @p src0. - * @param[in] dst Output tensor info. Data types supported: U16/U32. - * @param[in] op Comparison Operation to be performed. + * Similar to @ref CpuElementwiseComparison::configure() * * @return a status */ @@ -167,11 +158,9 @@ public: * @param[out] dst Output tensor info. Data types supported: U16/U32. */ void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref cpu::kernels::CpuComparisonKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src0 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. - * @param[in] src1 Second tensor input info. Data types supported: Same as @p src0. - * @param[in] dst Output tensor info. Data types supported: U16/U32. + * Similar to @ref CpuElementwiseComparisonStatic::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuElementwiseUnary.h b/src/runtime/cpu/operators/CpuElementwiseUnary.h index 721ba2a85b..5ea29e07e9 100644 --- a/src/runtime/cpu/operators/CpuElementwiseUnary.h +++ b/src/runtime/cpu/operators/CpuElementwiseUnary.h @@ -43,9 +43,7 @@ public: void configure(ElementWiseUnary op, const ITensorInfo &src, ITensorInfo &dst); /** Static function to check if given info will lead to a valid configuration * - * @param[in] op Unary operation to execute - * @param[in] src First tensor input info. Data types supported: F16/F32, F16/F32/S32 for NEG/ABS operations. - * @param[in] dst Output tensor info. Data types supported: Same as @p input. + * Similar to @ref CpuElementwiseUnary::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuFill.h b/src/runtime/cpu/operators/CpuFill.h index fac8e76481..b946467da6 100644 --- a/src/runtime/cpu/operators/CpuFill.h +++ b/src/runtime/cpu/operators/CpuFill.h @@ -34,8 +34,6 @@ namespace cpu class CpuFill : public ICpuOperator { public: - /** Constructor */ - CpuFill() = default; /** Configure operator for a given list of arguments * * @param[in,out] tensor Tensor to fill. Supported data types: All diff --git a/src/runtime/cpu/operators/CpuFlatten.h b/src/runtime/cpu/operators/CpuFlatten.h index ae71453988..3e24a93429 100644 --- a/src/runtime/cpu/operators/CpuFlatten.h +++ b/src/runtime/cpu/operators/CpuFlatten.h @@ -34,8 +34,6 @@ namespace cpu class CpuFlatten : public ICpuOperator { public: - /** Constructor */ - CpuFlatten() = default; /** Configure operator for a given list of arguments * * Valid data layouts: diff --git a/src/runtime/cpu/operators/CpuFloor.h b/src/runtime/cpu/operators/CpuFloor.h index cbb9d565eb..0cd0cc0b4e 100644 --- a/src/runtime/cpu/operators/CpuFloor.h +++ b/src/runtime/cpu/operators/CpuFloor.h @@ -34,18 +34,15 @@ namespace cpu class CpuFloor : public ICpuOperator { public: - /** Constructor */ - CpuFloor() = default; /** Configure operator for a given list of arguments * * @param[in] src Source tensor info. Data types supported: F16/F32. * @param[in] dst Destination tensor info. Data type supported: same as @p src */ void configure(const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuFloor + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data type supported: same as @p src + * Similar to @ref CpuFloor::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuGemmDirectConv2d.h b/src/runtime/cpu/operators/CpuGemmDirectConv2d.h index b572f36a3a..7fb20b3037 100644 --- a/src/runtime/cpu/operators/CpuGemmDirectConv2d.h +++ b/src/runtime/cpu/operators/CpuGemmDirectConv2d.h @@ -41,10 +41,8 @@ namespace cpu class CpuGemmDirectConv2d : public ICpuOperator { public: - /** Constructor */ CpuGemmDirectConv2d(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmDirectConv2d); - /** Destructor */ ~CpuGemmDirectConv2d(); /** Set the input and output tensors. * diff --git a/src/runtime/cpu/operators/CpuGemmLowpOutputStage.h b/src/runtime/cpu/operators/CpuGemmLowpOutputStage.h index 848aaea3e7..bed88a60d5 100644 --- a/src/runtime/cpu/operators/CpuGemmLowpOutputStage.h +++ b/src/runtime/cpu/operators/CpuGemmLowpOutputStage.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_GEMMLOWP_OUTPUTSTAGE_H -#define ARM_COMPUTE_CPU_GEMMLOWP_OUTPUTSTAGE_H +#ifndef ARM_COMPUTE_CPU_GEMMLOWP_OUTPUT_STAGE_H +#define ARM_COMPUTE_CPU_GEMMLOWP_OUTPUT_STAGE_H #include "arm_compute/core/Types.h" #include "src/runtime/cpu/ICpuOperator.h" @@ -51,10 +51,6 @@ namespace cpu class CpuGemmLowpOutputStage : public ICpuOperator { public: - /** Constructor */ - CpuGemmLowpOutputStage() = default; - /** Default destructor */ - ~CpuGemmLowpOutputStage() = default; /** Initialise the kernel's inputs, output * * Valid data layouts: @@ -87,4 +83,4 @@ public: }; } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_GEMMLOWP_OUTPUTSTAGE_H */ +#endif /* ARM_COMPUTE_CPU_GEMMLOWP_OUTPUT_STAGE_H */ diff --git a/src/runtime/cpu/operators/CpuMul.h b/src/runtime/cpu/operators/CpuMul.h index 6e717188a4..da518c4461 100644 --- a/src/runtime/cpu/operators/CpuMul.h +++ b/src/runtime/cpu/operators/CpuMul.h @@ -35,8 +35,6 @@ namespace cpu class CpuMul : public ICpuOperator { public: - /** Default Constructor */ - CpuMul() = default; /** Initialise the kernel's inputs, dst and convertion policy. * * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported. @@ -81,8 +79,6 @@ public: class CpuComplexMul : public ICpuOperator { public: - /** Default Constructor */ - CpuComplexMul() = default; /** Initialise the kernel's inputs, dst. * * @param[in, out] src1 First input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor). diff --git a/src/runtime/cpu/operators/CpuPermute.h b/src/runtime/cpu/operators/CpuPermute.h index 2b30d7fbd8..2500017c0e 100644 --- a/src/runtime/cpu/operators/CpuPermute.h +++ b/src/runtime/cpu/operators/CpuPermute.h @@ -34,8 +34,6 @@ namespace cpu class CpuPermute : public ICpuOperator { public: - /** Constructor */ - CpuPermute() = default; /** Configure operator for a given list of arguments * * @note Arbitrary permutation vectors are supported with rank not greater than 4 @@ -45,13 +43,9 @@ public: * @param[in] perm Permutation vector */ void configure(const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm); - /** Static function to check if given info will lead to a valid configuration of @ref CpuPermute + /** Static function to check if given info will lead to a valid configuration * - * @note Arbitrary permutation vectors are supported with rank not greater than 4 - * - * @param[in] src Source tensor to permute. Data types supported: All - * @param[in] dst Destination tensor. Data types supported: Same as @p dst - * @param[in] perm Permutation vector + * Similar to @ref CpuPermute::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuPool2d.h b/src/runtime/cpu/operators/CpuPool2d.h index 68416b5cfc..7feff91612 100644 --- a/src/runtime/cpu/operators/CpuPool2d.h +++ b/src/runtime/cpu/operators/CpuPool2d.h @@ -46,10 +46,8 @@ namespace cpu class CpuPool2d : public ICpuOperator { public: - /** Constructor */ CpuPool2d(); ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuPool2d); - /** Default destructor */ ~CpuPool2d(); /** Set the src and dst tensors. * diff --git a/src/runtime/cpu/operators/CpuQuantize.h b/src/runtime/cpu/operators/CpuQuantize.h index 09afffd920..9a34a36bcc 100644 --- a/src/runtime/cpu/operators/CpuQuantize.h +++ b/src/runtime/cpu/operators/CpuQuantize.h @@ -34,8 +34,6 @@ namespace cpu class CpuQuantize : public ICpuOperator { public: - /** Default Constructor */ - CpuQuantize() = default; /** Set the input and output tensors. * * @param[in] src Source tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. diff --git a/src/runtime/cpu/operators/CpuReshape.h b/src/runtime/cpu/operators/CpuReshape.h index e136043568..581b55e0ef 100644 --- a/src/runtime/cpu/operators/CpuReshape.h +++ b/src/runtime/cpu/operators/CpuReshape.h @@ -34,19 +34,15 @@ namespace cpu class CpuReshape : public ICpuOperator { public: - /** Constructor */ - CpuReshape() = default; /** Configure operator for a given list of arguments * * @param[in] src Source tensor info. Data type supported: All * @param[out] dst Destination info. Data type supported: Same as @p src */ void configure(const ITensorInfo *src, ITensorInfo *dst); - - /** Static function to check if given info will lead to a valid configuration of @ref CpuReshape + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data type supported: All - * @param[in] dst Destination tensor info. Data type supported: Same as @p src + * Similar to @ref CpuReshape::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuScale.cpp b/src/runtime/cpu/operators/CpuScale.cpp index 681a15e26c..475cb2d4e8 100644 --- a/src/runtime/cpu/operators/CpuScale.cpp +++ b/src/runtime/cpu/operators/CpuScale.cpp @@ -86,22 +86,18 @@ void precompute_dx_dy_offsets(ITensor *dx, ITensor *dy, ITensor *offsets, float } } // namespace -CpuScale::CpuScale() - : _scale_info(InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::UNDEFINED), _data_layout(DataLayout::UNKNOWN), _is_prepared(false) -{ -} - void CpuScale::configure(ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); ARM_COMPUTE_ERROR_THROW_ON(CpuScale::validate(src, dst, info)); - _scale_info = info; + _scale_info = info; + _is_prepared = false; // Get data layout and width/height indices - _data_layout = _scale_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : _scale_info.data_layout; - const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH); - const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT); + _data_layout = _scale_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : _scale_info.data_layout; + const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH); + const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT); // Compute the ratio between source width/height and destination width/height const bool is_align_corners_used = _scale_info.align_corners && arm_compute::scale_utils::is_align_corners_allowed_sampling_policy(_scale_info.sampling_policy); @@ -205,8 +201,8 @@ void CpuScale::prepare(ITensorPack &tensors) auto offsets = tensors.get_tensor(TensorType::ACL_INT_2); // Get data layout and width/height indices - const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH); - const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT); + const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH); + const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT); // Compute the ratio between source width/height and destination width/height const bool is_align_corners_used = _scale_info.align_corners && arm_compute::scale_utils::is_align_corners_allowed_sampling_policy(_scale_info.sampling_policy); diff --git a/src/runtime/cpu/operators/CpuScale.h b/src/runtime/cpu/operators/CpuScale.h index 90248a8d59..b83e04bc42 100644 --- a/src/runtime/cpu/operators/CpuScale.h +++ b/src/runtime/cpu/operators/CpuScale.h @@ -40,8 +40,6 @@ namespace cpu class CpuScale : public ICpuOperator { public: - /** Default Constructor */ - CpuScale(); /** Initialize the function's source, destination, interpolation type and border_mode. * * @param[in, out] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/U8/S16/F16/F32. (Written to only for @p border_mode != UNDEFINED) @@ -49,11 +47,9 @@ public: * @param[in] info @ref ScaleKernelInfo to be used for configuration */ void configure(ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info); - /** Static function to check if given info will lead to a valid configuration of @ref NEScale + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/U8/S16/F16/F32. (Written to only for @p border_mode != UNDEFINED) - * @param[in] dst Destination tensor info. Data type supported: Same as @p src. All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane. - * @param[in] info @ref ScaleKernelInfo to be used for validation + * Similar to @ref CpuScale::configure() * * @return a status */ @@ -64,10 +60,10 @@ public: void run(ITensorPack &tensors) override; private: - ScaleKernelInfo _scale_info; - DataLayout _data_layout; - bool _is_prepared; + ScaleKernelInfo _scale_info{ InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::UNDEFINED }; + DataLayout _data_layout{ DataLayout::UNKNOWN }; + bool _is_prepared{ false }; }; } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_SCALE_H */ +#endif /* ARM_COMPUTE_CPU_SCALE_H */ diff --git a/src/runtime/cpu/operators/CpuSoftmax.h b/src/runtime/cpu/operators/CpuSoftmax.h index 38817977b3..a9ac803c09 100644 --- a/src/runtime/cpu/operators/CpuSoftmax.h +++ b/src/runtime/cpu/operators/CpuSoftmax.h @@ -57,7 +57,6 @@ template class CpuSoftmaxGeneric : public ICpuOperator { public: - /** Constructor */ CpuSoftmaxGeneric(); /** Set the input and output tensors. * @@ -69,14 +68,9 @@ public: * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0 */ void configure(const ITensorInfo *src, ITensorInfo *dst, float beta = 1.0f, int32_t axis = 0); - - /** Static function to check if given info will lead to a valid configuration of @ref CpuSoftmax + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] dst Destination tensor info. Data types supported: same as @p input - * @param[in] beta (Optional) A scaling factor for the exponent. - * @param[in] axis (Optional) The dimension in which to apply the function. E.g. for input of shape 4x5x6 and - * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0 + * Similar to @ref CpuSoftmaxGeneric::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuSub.h b/src/runtime/cpu/operators/CpuSub.h index 099ffef87e..aad01fe4dc 100644 --- a/src/runtime/cpu/operators/CpuSub.h +++ b/src/runtime/cpu/operators/CpuSub.h @@ -56,26 +56,9 @@ public: * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. */ void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref CpuSub + /** Static function to check if given info will lead to a valid configuration * - * Valid configurations (src0,src1) -> dst : - * - * - (U8,U8) -> U8 - * - (U8,U8) -> S16 - * - (QASYMM8, QASYMM8) -> QASYMM8 - * - (QASYMM8_SIGNED, QASYMM8_SIGNED) -> QASYMM8_SIGNED - * - (S16,U8) -> S16 - * - (U8,S16) -> S16 - * - (S16,S16) -> S16 - * - (S32,S32) -> S32 - * - (F16,F16) -> F16 - * - (F32,F32) -> F32 - * - * @param[in] src0 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32 - * @param[in] src1 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32 - * @param[in] dst Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32 - * @param[in] policy Policy to use to handle overflow. Convert policy cannot be WRAP if datatype is quantized. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + * Similar to @ref CpuSub::configure() * * @return a status */ diff --git a/src/runtime/cpu/operators/CpuTranspose.h b/src/runtime/cpu/operators/CpuTranspose.h index c0232ddab2..0735924839 100644 --- a/src/runtime/cpu/operators/CpuTranspose.h +++ b/src/runtime/cpu/operators/CpuTranspose.h @@ -34,18 +34,15 @@ namespace cpu class CpuTranspose : public ICpuOperator { public: - /** Constructor */ - CpuTranspose() = default; /** Configure operator for a given list of arguments * * @param[in] src Source tensor to permute. Data types supported: All * @param[out] dst Destintation tensor. Data types supported: Same as @p src */ void configure(const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuTranspose + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor to permute. Data types supported: All - * @param[in] dst Destination tensor. Data types supported: Same as @p dst + * Similar to @ref CpuTranspose::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClActivation.h b/src/runtime/gpu/cl/operators/ClActivation.h index 235b826b87..82ef8ac63a 100644 --- a/src/runtime/gpu/cl/operators/ClActivation.h +++ b/src/runtime/gpu/cl/operators/ClActivation.h @@ -35,8 +35,6 @@ namespace opencl class ClActivation : public IClOperator { public: - /** Constructor */ - ClActivation() = default; /** Configure operator for a given list of arguments * * @param[in] compile_context The compile context to be used. @@ -45,11 +43,9 @@ public: * @param[in] activation_info Activation layer parameters. */ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const ActivationLayerInfo &activation_info); - /** Static function to check if given info will lead to a valid configuration of @ref ClActivation + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32. - * @param[in] dst Destination tensor info. Data type supported: same as @p src - * @param[in] act_info Activation layer information. + * Similar to @ref ClActivation::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClAdd.h b/src/runtime/gpu/cl/operators/ClAdd.h index f751d8dc83..7b84a767d6 100644 --- a/src/runtime/gpu/cl/operators/ClAdd.h +++ b/src/runtime/gpu/cl/operators/ClAdd.h @@ -39,8 +39,6 @@ namespace opencl class ClAdd : public IClOperator { public: - /** Default Constructor */ - ClAdd() = default; /** Configure function for a given list of arguments. * * Valid configurations (src1,src2) -> dst : @@ -68,27 +66,9 @@ public: */ void configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref ClAdd + /** Static function to check if given info will lead to a valid configuration * - * Valid configurations (src1,src2) -> dst : - * - * - (U8,U8) -> U8 - * - (U8,U8) -> S16 - * - (S16,U8) -> S16 - * - (U8,S16) -> S16 - * - (S16,S16) -> S16 - * - (S32,S32) -> S32 - * - (F16,F16) -> F16 - * - (F32,F32) -> F32 - * - (QASYMM8,QASYMM8) -> QASYMM8 - * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED - * - (QSYMM16,QSYMM16) -> QSYMM16 - * - * @param[in] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. - * @param[in] src2 Second source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. - * @param[in] dst Destination tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. - * @param[in] policy Policy to use to handle overflow. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * Similar to @ref ClAdd::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClCast.h b/src/runtime/gpu/cl/operators/ClCast.h index 69e028debd..107eb2bfe9 100644 --- a/src/runtime/gpu/cl/operators/ClCast.h +++ b/src/runtime/gpu/cl/operators/ClCast.h @@ -35,8 +35,6 @@ namespace opencl class ClCast : public IClOperator { public: - /** Constructor */ - ClCast() = default; /** Configure operator for a given list of arguments * * @note Input data type must be different than output data type. diff --git a/src/runtime/gpu/cl/operators/ClConcatenate.cpp b/src/runtime/gpu/cl/operators/ClConcatenate.cpp index 4385fcfaed..d3c05eae78 100644 --- a/src/runtime/gpu/cl/operators/ClConcatenate.cpp +++ b/src/runtime/gpu/cl/operators/ClConcatenate.cpp @@ -42,13 +42,6 @@ namespace arm_compute { namespace opencl { -ClConcatenate::ClConcatenate() - : _concat_kernels(), - _num_inputs(0), - _axis(Window::DimX) -{ -} - void ClConcatenate::configure(const CLCompileContext &compile_context, const std::vector &src_vector, ITensorInfo *dst, size_t axis) { ARM_COMPUTE_ERROR_ON(dst == nullptr); diff --git a/src/runtime/gpu/cl/operators/ClConcatenate.h b/src/runtime/gpu/cl/operators/ClConcatenate.h index 0d960a605c..fb1235b9be 100644 --- a/src/runtime/gpu/cl/operators/ClConcatenate.h +++ b/src/runtime/gpu/cl/operators/ClConcatenate.h @@ -44,8 +44,7 @@ namespace opencl class ClConcatenate : public IClOperator { public: - /** Default constructor */ - ClConcatenate(); + ClConcatenate() = default; /** Initialise the kernel's inputs vector and dst. * * @note Input and dst tensor dimensions preconditions defer depending on the concatenation axis. @@ -59,15 +58,9 @@ public: * @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3. */ void configure(const ClCompileContext &compile_context, const std::vector &src_vector, ITensorInfo *dst, size_t axis); - /** Static function to check if given info will lead to a valid configuration of @ref ClConcatenate + /** Static function to check if given info will lead to a valid configuration * - * @note Input and dst tensor dimensions preconditions defer depending on the concatenation axis. - * @note Preconditions can be found respectively at @ref kernels::ClWidthConcatenateKernel, - * @ref kernels::ClHeightConcatenateKernel and @ref kernels::ClDepthConcatenateKernel. - * - * @param[in] src_vector The vectors containing all the tensors info to concatenate. Data types supported: All - * @param[in] dst Destination tensor info. Data types supported: same as @p src_vector. - * @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3. + * Similar to @ref ClConcatenate::configure() * * @return a status */ @@ -77,9 +70,9 @@ public: void run(ITensorPack &tensors) override; private: - std::vector> _concat_kernels; - unsigned int _num_inputs; - unsigned int _axis; + std::vector> _concat_kernels{}; + unsigned int _num_inputs{ 0 }; + unsigned int _axis{ 0 }; }; } // namespace opencl } // namespace arm_comPUTE diff --git a/src/runtime/gpu/cl/operators/ClConvertFullyConnectedWeights.h b/src/runtime/gpu/cl/operators/ClConvertFullyConnectedWeights.h index efedc2fcb7..7ea35c5a8a 100644 --- a/src/runtime/gpu/cl/operators/ClConvertFullyConnectedWeights.h +++ b/src/runtime/gpu/cl/operators/ClConvertFullyConnectedWeights.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_CONVERTFULLYCONNECTEDWEIGHTS_H -#define ARM_COMPUTE_CL_CONVERTFULLYCONNECTEDWEIGHTS_H +#ifndef ARM_COMPUTE_CL_CONVERT_FULLYCONNECTED_WEIGHTS_H +#define ARM_COMPUTE_CL_CONVERT_FULLYCONNECTED_WEIGHTS_H #include "src/core/gpu/cl/ClCompileContext.h" #include "src/runtime/gpu/cl/IClOperator.h" @@ -35,8 +35,6 @@ namespace opencl class ClConvertFullyConnectedWeights : public IClOperator { public: - /** Constructor */ - ClConvertFullyConnectedWeights() = default; /** Initialise the kernel's inputs and outputs * * @param[in] compile_context The compile context to be used. @@ -46,12 +44,9 @@ public: * @param[in] data_layout The data layout the weights have been trained in. */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout); - /** Static function to check if given info will lead to a valid configuration of @ref kernels::ClConvertFullyConnectedWeightsKernel. + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First tensor src info. Data types supported: All. - * @param[in] dst Output tensor info. Data types supported: same as @p src. - * @param[in] original_src_shape Shape of the original src tensor (the one entering fully connected layer). - * @param[in] data_layout The data layout the weights have been trained in. + * Similar to @ref ClConvertFullyConnectedWeights::configure() * * @return a status */ @@ -59,4 +54,4 @@ public: }; } // namespace opencl } // namespace arm_compute -#endif /* ARM_COMPUTE_CL_CONVERTFULLYCONNECTEDWEIGHTS_H */ +#endif /* ARM_COMPUTE_CL_CONVERT_FULLYCONNECTED_WEIGHTS_H */ diff --git a/src/runtime/gpu/cl/operators/ClCopy.h b/src/runtime/gpu/cl/operators/ClCopy.h index 0b99676f65..e8ea8125eb 100644 --- a/src/runtime/gpu/cl/operators/ClCopy.h +++ b/src/runtime/gpu/cl/operators/ClCopy.h @@ -36,8 +36,6 @@ namespace opencl class ClCopy : public IClOperator { public: - /** Constructor */ - ClCopy() = default; /** Initialise the function's source and destination. * * @param[in] compile_context The compile context to be used. @@ -47,11 +45,9 @@ public: * */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, Window *dst_window = nullptr); - /** Static function to check if given info will lead to a valid configuration of @ref kernels::ClCopyKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: All. - * @param[in] dst Output tensor info. Data types supported: Same as @p src. - * @param[in] dst_window (Optional) Window to be used in case only copying into part of a tensor. Default is nullptr. + * Similar to @ref ClCopy::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClCrop.h b/src/runtime/gpu/cl/operators/ClCrop.h index acfbf14742..cca69d6d77 100644 --- a/src/runtime/gpu/cl/operators/ClCrop.h +++ b/src/runtime/gpu/cl/operators/ClCrop.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_COPY_H -#define ARM_COMPUTE_CL_COPY_H +#ifndef ARM_COMPUTE_CL_CROP_H +#define ARM_COMPUTE_CL_CROP_H #include "arm_compute/core/Window.h" #include "src/core/gpu/cl/ClCompileContext.h" @@ -36,8 +36,6 @@ namespace opencl class ClCrop : public IClOperator { public: - /** Constructor */ - ClCrop() = default; /** Initialise the function's source and destination. * * @note Supported tensor rank: up to 4 @@ -53,22 +51,15 @@ public: */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0, Window *dst_window = nullptr); - - /** Static function to check if given info will lead to a valid configuration of @ref kernels::ClCropKernel + /** Static function to check if given info will lead to a valid configuration * - * @note Supported tensor rank: up to 4 + * Similar to @ref ClCrop::configure() * - * @param[in] src Source tensor info. Data type supported: All. Data layouts supported: NHWC. - * @param[in] dst Destination tensor info. Data type supported: F32 - * @param[in] start Coordinates of where to start cropping the image. - * @param[in] end Coordinates of where to end cropping the image. - * @param[in] batch_index Fourth dimension index of the 3D image to crop in @p src. - * @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0. - * @param[in] dst_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr. + * @return a status */ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0, Window *dst_window = nullptr); }; } // namespace opencl } // namespace arm_compute -#endif /* ARM_COMPUTE_CL_COPY_H */ +#endif /* ARM_COMPUTE_CL_CROP_H */ diff --git a/src/runtime/gpu/cl/operators/ClDequantize.h b/src/runtime/gpu/cl/operators/ClDequantize.h index 47fad3eeee..5bcdcb2113 100644 --- a/src/runtime/gpu/cl/operators/ClDequantize.h +++ b/src/runtime/gpu/cl/operators/ClDequantize.h @@ -35,8 +35,6 @@ namespace opencl class ClDequantize : public IClOperator { public: - /** Constructor */ - ClDequantize() = default; /** Set the input and output tensors. * * @param[in] compile_context The compile context to be used. diff --git a/src/runtime/gpu/cl/operators/ClDirectConv2d.h b/src/runtime/gpu/cl/operators/ClDirectConv2d.h index e069733fab..a2785b52e3 100644 --- a/src/runtime/gpu/cl/operators/ClDirectConv2d.h +++ b/src/runtime/gpu/cl/operators/ClDirectConv2d.h @@ -42,7 +42,6 @@ namespace opencl class ClDirectConv2d : public IClOperator { public: - /** Constructor */ ClDirectConv2d() = default; /** Set the src and dst tensors. * diff --git a/src/runtime/gpu/cl/operators/ClElementwiseOperations.h b/src/runtime/gpu/cl/operators/ClElementwiseOperations.h index b9ab1405c8..c01b107d97 100644 --- a/src/runtime/gpu/cl/operators/ClElementwiseOperations.h +++ b/src/runtime/gpu/cl/operators/ClElementwiseOperations.h @@ -39,8 +39,6 @@ namespace opencl class ClElementwiseDivision : public IClOperator { public: - /** Default Constructor */ - ClElementwiseDivision() = default; /** Configure function for a given list of arguments. * * @param[in] compile_context The compile context to be used. @@ -50,12 +48,9 @@ public: * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ void configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref ClElementwiseDivision + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src1 First source tensor info. Data types supported: F16/F32. - * @param[in] src2 Second source tensor info. Data types supported: same as @p src1. - * @param[in] dst Destination tensor info. Data types supported: same as @p src1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * Similar to @ref ClElementwiseDivision::configure() * * @return a status */ @@ -70,8 +65,6 @@ public: class ClElementwiseMax : public IClOperator { public: - /** Default Constructor */ - ClElementwiseMax() = default; /** Configure function for a given list of arguments. * * @param[in] compile_context The compile context to be used. @@ -81,12 +74,9 @@ public: * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ void configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for max + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. - * @param[in] src2 Second source tensor info. Data types supported: same as @p src1. - * @param[in] dst Destination tensor info. Data types supported: same as @p src1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * Similar to @ref ClElementwiseMax::configure() * * @return a status */ @@ -101,8 +91,6 @@ public: class ClElementwiseMin : public IClOperator { public: - /** Default Constructor */ - ClElementwiseMin() = default; /** Configure function for a given list of arguments. * * @param[in] compile_context The compile context to be used. @@ -112,12 +100,9 @@ public: * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ void configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for min + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. - * @param[in] src2 Second source tensor info. Data types supported: same as @p src1. - * @param[in] dst Destination tensor info. Data types supported: same as @p src1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * Similar to @ref ClElementwiseMin::configure() * * @return a status */ @@ -132,8 +117,6 @@ public: class ClElementwiseSquaredDiff : public IClOperator { public: - /** Default Constructor */ - ClElementwiseSquaredDiff() = default; /** Configure function for a given list of arguments. * * @param[in] compile_context The compile context to be used. @@ -143,12 +126,9 @@ public: * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ void configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for squared difference + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. - * @param[in] src2 Second source tensor info. Data types supported: same as @p src1. - * @param[in] dst Destination tensor info. Data types supported: same as @p src1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * Similar to @ref ClElementwiseSquaredDiff::configure() * * @return a status */ @@ -163,8 +143,6 @@ public: class ClElementwisePower : public IClOperator { public: - /** Default Constructor */ - ClElementwisePower() = default; /** Configure function for a given list of arguments. * * @param[in] compile_context The compile context to be used. @@ -174,12 +152,9 @@ public: * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ void configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for power + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src1 First source tensor info. Data types supported: F16/F32. - * @param[in] src2 Second source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data types supported: F16/F32. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * Similar to @ref ClElementwisePower::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClElementwiseUnary.h b/src/runtime/gpu/cl/operators/ClElementwiseUnary.h index b40e3e9a3b..b9acf6f5b8 100644 --- a/src/runtime/gpu/cl/operators/ClElementwiseUnary.h +++ b/src/runtime/gpu/cl/operators/ClElementwiseUnary.h @@ -35,8 +35,6 @@ namespace opencl class ClRsqrt : public IClOperator { public: - /** Constructor */ - ClRsqrt() = default; /** Initialize the function * * @param[in] compile_context The compile context to be used. @@ -44,10 +42,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: same as @p src. */ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClRsqrt + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to @ref ClRsqrt::configure() * * @return a status */ @@ -58,8 +55,6 @@ public: class ClExp : public IClOperator { public: - /** Constructor */ - ClExp() = default; /** Initialize the function * * @param[in] compile_context The compile context to be used. @@ -67,10 +62,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: same as @p src. */ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClExp + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to @ref ClExp::configure() * * @return a status */ @@ -81,8 +75,6 @@ public: class ClNeg : public IClOperator { public: - /** Constructor */ - ClNeg() = default; /** Initialize the function * * @param[in] compile_context The compile context to be used. @@ -90,10 +82,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: same as @p src. */ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClNeg + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to @ref ClNeg::configure() * * @return a status */ @@ -104,8 +95,6 @@ public: class ClSin : public IClOperator { public: - /** Constructor */ - ClSin() = default; /** Initialize the function * * @param[in] compile_context The compile context to be used. @@ -113,10 +102,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: same as @p src. */ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClSin + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to @ref ClSin::configure() * * @return a status */ @@ -127,8 +115,6 @@ public: class ClLog : public IClOperator { public: - /** Constructor */ - ClLog() = default; /** Initialize the function * * @param[in] compile_context The compile context to be used. @@ -136,10 +122,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: same as @p src. */ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClLog + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to @ref ClLog::configure() * * @return a status */ @@ -157,10 +142,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: same as @p src. */ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClAbs + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to @ref ClAbs::configure() * * @return a status */ @@ -178,10 +162,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: same as @p src. */ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClRound + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to @ref ClRound::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClFill.h b/src/runtime/gpu/cl/operators/ClFill.h index e632d88546..cc79b915a7 100644 --- a/src/runtime/gpu/cl/operators/ClFill.h +++ b/src/runtime/gpu/cl/operators/ClFill.h @@ -36,8 +36,6 @@ namespace opencl class ClFill : public IClOperator { public: - /** Constructor */ - ClFill() = default; /** Initialise the kernel's tensor and filling value * * @param[in] compile_context The compile context to be used. @@ -46,11 +44,9 @@ public: * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr. */ void configure(const CLCompileContext &compile_context, ITensorInfo *tensor, const PixelValue &constant_value, Window *window = nullptr); - /** Static function to check if given info will lead to a valid configuration of @ref kernels::ClFillKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] tensor Source tensor info. Data types supported: All. - * @param[in] constant_value The value used to fill the planes of the tensor. - * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr. + * Similar to ClFill::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClFlatten.h b/src/runtime/gpu/cl/operators/ClFlatten.h index 20ad06ee57..8bd619b518 100644 --- a/src/runtime/gpu/cl/operators/ClFlatten.h +++ b/src/runtime/gpu/cl/operators/ClFlatten.h @@ -35,8 +35,6 @@ namespace opencl class ClFlatten : public IClOperator { public: - /** Constructor */ - ClFlatten() = default; /** Configure operator for a given list of arguments * * Valid data layouts: diff --git a/src/runtime/gpu/cl/operators/ClFloor.h b/src/runtime/gpu/cl/operators/ClFloor.h index f54eef9140..90bdee6c7e 100644 --- a/src/runtime/gpu/cl/operators/ClFloor.h +++ b/src/runtime/gpu/cl/operators/ClFloor.h @@ -35,8 +35,6 @@ namespace opencl class ClFloor : public IClOperator { public: - /** Constructor */ - ClFloor() = default; /** Configure operator for a given list of arguments * * @param[in] compile_context The compile context to be used. @@ -44,10 +42,9 @@ public: * @param[in] dst Destination tensor info. Data type supported: same as @p src */ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClFloor + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: F16/F32. - * @param[in] dst Destination tensor info. Data type supported: same as @p src + * Similar to ClFloor::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClLogicalNot.h b/src/runtime/gpu/cl/operators/ClLogicalNot.h index 25ddf564b5..782ac0848f 100644 --- a/src/runtime/gpu/cl/operators/ClLogicalNot.h +++ b/src/runtime/gpu/cl/operators/ClLogicalNot.h @@ -35,8 +35,6 @@ namespace opencl class ClLogicalNot : public IClOperator { public: - /** Constructor */ - ClLogicalNot() = default; /** Configure operator for a given list of arguments * * @param[in] compile_context The compile context to be used. @@ -46,8 +44,7 @@ public: void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Soure tensor info. Data types supported: U8. - * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * Similar to ClLogicalNot::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClMul.h b/src/runtime/gpu/cl/operators/ClMul.h index 4a662b3276..29d5885a1c 100644 --- a/src/runtime/gpu/cl/operators/ClMul.h +++ b/src/runtime/gpu/cl/operators/ClMul.h @@ -35,8 +35,6 @@ namespace opencl class ClMul : public IClOperator { public: - /** Default Constructor */ - ClMul() = default; /** Initialise the kernel's sources, dst and convertion policy. * * Valid configurations (src1,src2) -> Output : @@ -81,8 +79,6 @@ public: class ClComplexMul : public IClOperator { public: - /** Default Constructor */ - ClComplexMul() = default; /** Initialise the kernel's sources, dst. * * @param[in] compile_context The compile context to be used. diff --git a/src/runtime/gpu/cl/operators/ClPRelu.h b/src/runtime/gpu/cl/operators/ClPRelu.h index 70202aeb81..3a02030635 100644 --- a/src/runtime/gpu/cl/operators/ClPRelu.h +++ b/src/runtime/gpu/cl/operators/ClPRelu.h @@ -38,8 +38,6 @@ namespace opencl class ClPRelu : public IClOperator { public: - /** Default constructor */ - ClPRelu() = default; /** Set the input and output tensor. * * @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place @@ -50,11 +48,9 @@ public: * @param[out] output Destination tensor. Data type supported: same as @p input */ void configure(const CLCompileContext &compile_context, ITensorInfo *input, ITensorInfo *alpha, ITensorInfo *output); - /** Static function to check if given info will lead to a valid configuration of @ref arm_compute::opencl::kernels::ClArithmeticKernel for PRELU + /** Static function to check if given info will lead to a valid configuration * - * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] alpha PRelu layer parameters. Data types supported: same of @p input. - * @param[in] output Destination tensor info. Data type supported: same as @p input + * Similar to ClPRelu::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClPermute.h b/src/runtime/gpu/cl/operators/ClPermute.h index 20e7a32428..867aba010d 100644 --- a/src/runtime/gpu/cl/operators/ClPermute.h +++ b/src/runtime/gpu/cl/operators/ClPermute.h @@ -35,8 +35,6 @@ namespace opencl class ClPermute : public IClOperator { public: - /** Constructor */ - ClPermute() = default; /** Initialise the kernel's inputs and outputs and permute vector * * @note Arbitrary permutation vectors are supported with rank not greater than 4 @@ -47,13 +45,9 @@ public: * @param[in] perm Permutation vector */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm); - /** Static function to check if given info will lead to a valid configuration of @ref kernels::ClPermuteKernel. + /** Static function to check if given info will lead to a valid configuration * - * @note Arbitrary permutation vectors are supported with rank not greater than 4 - * - * @param[in] src First tensor src info. Data types supported: All. - * @param[in] dst Output tensor info. Data types supported: same as @p src. - * @param[in] perm Permutation vector + * Similar to ClPermute::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClQuantize.h b/src/runtime/gpu/cl/operators/ClQuantize.h index 0b6d2c8cbe..b15d389cca 100644 --- a/src/runtime/gpu/cl/operators/ClQuantize.h +++ b/src/runtime/gpu/cl/operators/ClQuantize.h @@ -35,8 +35,6 @@ namespace opencl class ClQuantize : public IClOperator { public: - /** Constructor */ - ClQuantize() = default; /** Set the input and output tensors. * * @param[in] compile_context The compile context to be used. diff --git a/src/runtime/gpu/cl/operators/ClReshape.h b/src/runtime/gpu/cl/operators/ClReshape.h index 8cccc5776c..b3d9267be4 100644 --- a/src/runtime/gpu/cl/operators/ClReshape.h +++ b/src/runtime/gpu/cl/operators/ClReshape.h @@ -35,8 +35,6 @@ namespace opencl class ClReshape : public IClOperator { public: - /** Constructor */ - ClReshape() = default; /** Initialise the kernel's inputs and outputs * * @param[in] compile_context The compile context to be used. @@ -44,11 +42,9 @@ public: * @param[out] output Output info. Data type supported: Same as @p input */ void configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output); - - /** Static function to check if given info will lead to a valid configuration of @ref kernels::ClReshapeKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] input Input tensor info. Data type supported: All - * @param[in] output Output tensor info. Data type supported: Same as @p input + * Similar to ClReshape::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClScale.h b/src/runtime/gpu/cl/operators/ClScale.h index 6eccb59be8..905c43a41c 100644 --- a/src/runtime/gpu/cl/operators/ClScale.h +++ b/src/runtime/gpu/cl/operators/ClScale.h @@ -51,13 +51,9 @@ public: * @param[in] info @ref ScaleKernelInfo descriptor to be used to configure */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info); - - /** Static function to check if given info will lead to a valid configuration of @ref ClScale + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32. - * @param[in] dst Output tensor info. Data type supported: Same as @p src - * All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane. - * @param[in] info @ref ScaleKernelInfo descriptor to be used to validate + * Similar to ClScale::configure() * * @return a status */ @@ -71,4 +67,4 @@ protected: }; } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CLSCALE_H */ +#endif /* ARM_COMPUTE_CLSCALE_H */ diff --git a/src/runtime/gpu/cl/operators/ClSoftmax.h b/src/runtime/gpu/cl/operators/ClSoftmax.h index f19a51fc5e..c85b193d9d 100644 --- a/src/runtime/gpu/cl/operators/ClSoftmax.h +++ b/src/runtime/gpu/cl/operators/ClSoftmax.h @@ -51,15 +51,13 @@ public: * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax * @param[out] dst Destination tensor info. Data types supported: same as @p src * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. - * */ void configure(const CLCompileContext &compile_context, const ITensorInfo &src, ITensorInfo &dst, const SoftmaxKernelInfo &info); - /** Static function to check if the given info will lead to a valid configuration + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax - * @param[out] dst Destination tensor info. Data types supported: same as @p src - * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. + * Similar to ClSoftmax::configure() * + * @return a status */ static Status validate(const ITensorInfo &src, const ITensorInfo &dst, const SoftmaxKernelInfo &info); // Inherited methods overridden: diff --git a/src/runtime/gpu/cl/operators/ClSub.h b/src/runtime/gpu/cl/operators/ClSub.h index bcad84d583..2dac11c00e 100644 --- a/src/runtime/gpu/cl/operators/ClSub.h +++ b/src/runtime/gpu/cl/operators/ClSub.h @@ -39,8 +39,6 @@ namespace opencl class ClSub : public IClOperator { public: - /** Default Constructor */ - ClSub() = default; /** Configure function for a given list of arguments. * * Valid configurations (src1,src2) -> dst : @@ -68,27 +66,9 @@ public: */ void configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref ClSub + /** Static function to check if given info will lead to a valid configuration * - * Valid configurations (src1,src2) -> dst : - * - * - (U8,U8) -> U8 - * - (U8,U8) -> S16 - * - (S16,U8) -> S16 - * - (U8,S16) -> S16 - * - (S16,S16) -> S16 - * - (S32,S32) -> S32 - * - (F16,F16) -> F16 - * - (F32,F32) -> F32 - * - (QASYMM8,QASYMM8) -> QASYMM8 - * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED - * - (QSYMM16,QSYMM16) -> QSYMM16 - * - * @param[in] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. - * @param[in] src2 Second source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. - * @param[in] dst Destination tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. - * @param[in] policy Policy to use to handle overflow. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * Similar to @ref ClSub::configure() * * @return a status */ diff --git a/src/runtime/gpu/cl/operators/ClTranspose.h b/src/runtime/gpu/cl/operators/ClTranspose.h index d898f677ca..dcd80820bb 100644 --- a/src/runtime/gpu/cl/operators/ClTranspose.h +++ b/src/runtime/gpu/cl/operators/ClTranspose.h @@ -35,8 +35,6 @@ namespace opencl class ClTranspose : public IClOperator { public: - /** Constructor */ - ClTranspose() = default; /** Initialise the kernel's inputs and outputs * * @param[in] compile_context The compile context to be used. @@ -44,10 +42,9 @@ public: * @param[in] dst The dst tensor info. Data types supported: Same as @p src */ void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref kernels::ClTransposeKernel. + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src First tensor src info. Data types supported: All. - * @param[in] dst Output tensor info. Data types supported: same as @p src. + * Similar to ClTranspose::configure() * * @return a status */ -- cgit v1.2.1