aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/cpu
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2021-07-02 09:01:49 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-07-02 15:47:11 +0000
commit2eb5d16b839cbc28c6cb7f0de7a0bf15290b425a (patch)
tree523d495c3a4c07d87b337c45a81afa06c9b1f495 /src/runtime/cpu
parent4dfc5538948c196def6d2e3305fe8051a5df3f15 (diff)
downloadComputeLibrary-2eb5d16b839cbc28c6cb7f0de7a0bf15290b425a.tar.gz
Align kernel/operator header layout
- Redirect validate documentation to configure - Align header names - Align class layout Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: Ia40f67383826a66e9f9a33745d66805551e31a3a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5897 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'src/runtime/cpu')
-rw-r--r--src/runtime/cpu/operators/CpuActivation.h8
-rw-r--r--src/runtime/cpu/operators/CpuAdd.h10
-rw-r--r--src/runtime/cpu/operators/CpuCast.h2
-rw-r--r--src/runtime/cpu/operators/CpuConcatenate.cpp5
-rw-r--r--src/runtime/cpu/operators/CpuConcatenate.h19
-rw-r--r--src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h15
-rw-r--r--src/runtime/cpu/operators/CpuCopy.h8
-rw-r--r--src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp17
-rw-r--r--src/runtime/cpu/operators/CpuDepthwiseConv2d.h21
-rw-r--r--src/runtime/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h3
-rw-r--r--src/runtime/cpu/operators/CpuDequantize.h2
-rw-r--r--src/runtime/cpu/operators/CpuDirectConv2d.h2
-rw-r--r--src/runtime/cpu/operators/CpuElementwise.h33
-rw-r--r--src/runtime/cpu/operators/CpuElementwiseUnary.h4
-rw-r--r--src/runtime/cpu/operators/CpuFill.h2
-rw-r--r--src/runtime/cpu/operators/CpuFlatten.h2
-rw-r--r--src/runtime/cpu/operators/CpuFloor.h7
-rw-r--r--src/runtime/cpu/operators/CpuGemmDirectConv2d.h2
-rw-r--r--src/runtime/cpu/operators/CpuGemmLowpOutputStage.h10
-rw-r--r--src/runtime/cpu/operators/CpuMul.h4
-rw-r--r--src/runtime/cpu/operators/CpuPermute.h10
-rw-r--r--src/runtime/cpu/operators/CpuPool2d.h2
-rw-r--r--src/runtime/cpu/operators/CpuQuantize.h2
-rw-r--r--src/runtime/cpu/operators/CpuReshape.h8
-rw-r--r--src/runtime/cpu/operators/CpuScale.cpp18
-rw-r--r--src/runtime/cpu/operators/CpuScale.h16
-rw-r--r--src/runtime/cpu/operators/CpuSoftmax.h10
-rw-r--r--src/runtime/cpu/operators/CpuSub.h21
-rw-r--r--src/runtime/cpu/operators/CpuTranspose.h7
29 files changed, 67 insertions, 203 deletions
diff --git a/src/runtime/cpu/operators/CpuActivation.h b/src/runtime/cpu/operators/CpuActivation.h
index 0ae16bf958..ded4a37edb 100644
--- a/src/runtime/cpu/operators/CpuActivation.h
+++ b/src/runtime/cpu/operators/CpuActivation.h
@@ -34,8 +34,6 @@ namespace cpu
class CpuActivation : public ICpuOperator
{
public:
- /** Constructor */
- CpuActivation() = default;
/** Configure operator for a given list of arguments
*
* @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32.
@@ -43,11 +41,9 @@ public:
* @param[in] activation_info Activation layer parameters.
*/
void configure(const ITensorInfo *input, ITensorInfo *output, const ActivationLayerInfo &activation_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuActivation
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32.
- * @param[in] output Destination tensor info. Data type supported: same as @p src
- * @param[in] act_info Activation layer information.
+ * Similar to @ref CpuActivation::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuAdd.h b/src/runtime/cpu/operators/CpuAdd.h
index 8ae7833f01..febb79e4cd 100644
--- a/src/runtime/cpu/operators/CpuAdd.h
+++ b/src/runtime/cpu/operators/CpuAdd.h
@@ -34,8 +34,6 @@ namespace cpu
class CpuAdd : public ICpuOperator
{
public:
- /** Constructor */
- CpuAdd() = default;
/** Initialise the kernel's input, dst and border mode.
*
* Valid configurations (src0,src1) -> dst :
@@ -60,13 +58,9 @@ public:
*
*/
void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Static function to check if given info will lead to a valid configuration of @ref CpuAdd
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src0 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32
- * @param[in] src1 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32
- * @param[in] dst The dst tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32.
- * @param[in] policy Overflow policy.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * Similar to @ref CpuAdd::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuCast.h b/src/runtime/cpu/operators/CpuCast.h
index 2aea2d2b09..26f5740b86 100644
--- a/src/runtime/cpu/operators/CpuCast.h
+++ b/src/runtime/cpu/operators/CpuCast.h
@@ -34,8 +34,6 @@ namespace cpu
class CpuCast : public ICpuOperator
{
public:
- /** Constructor */
- CpuCast() = default;
/** Configure operator for a given list of arguments
*
* Input data type must be different than output data type.
diff --git a/src/runtime/cpu/operators/CpuConcatenate.cpp b/src/runtime/cpu/operators/CpuConcatenate.cpp
index 23eb3fceab..bb475b790e 100644
--- a/src/runtime/cpu/operators/CpuConcatenate.cpp
+++ b/src/runtime/cpu/operators/CpuConcatenate.cpp
@@ -42,11 +42,6 @@ namespace arm_compute
{
namespace cpu
{
-CpuConcatenate::CpuConcatenate()
- : _concat_kernels(), _num_srcs(0), _axis(0)
-{
-}
-
void CpuConcatenate::configure(const std::vector<const ITensorInfo *> &srcs_vector, ITensorInfo *dst, size_t axis)
{
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/runtime/cpu/operators/CpuConcatenate.h b/src/runtime/cpu/operators/CpuConcatenate.h
index d2af3e2ad2..55eab54996 100644
--- a/src/runtime/cpu/operators/CpuConcatenate.h
+++ b/src/runtime/cpu/operators/CpuConcatenate.h
@@ -43,8 +43,7 @@ namespace cpu
class CpuConcatenate : public ICpuOperator
{
public:
- /** Constructor */
- CpuConcatenate();
+ CpuConcatenate() = default;
/** Configure operator for a given list of arguments
*
* @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
@@ -56,15 +55,9 @@ public:
* @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3.
*/
void configure(const std::vector<const ITensorInfo *> &srcs_vector, ITensorInfo *dst, size_t axis);
- /** Static function to check if given info will lead to a valid configuration of @ref NEConcatenateLayer
+ /** Static function to check if given info will lead to a valid configuration
*
- * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
- * @note Preconditions can be found respectively at @ref kernels::CpuConcatenateWidthKernel, @ref kernels::CpuConcatenateHeightKernel,
- * @ref kernels::CpuConcatenateDepthKernel and @ref kernels::CpuConcatenateBatchKernel.
- *
- * @param[in] srcs_vector The vectors containing all the tensors info to concatenate. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] dst Output tensor info. Data types supported: Same as @p srcs_vector.
- * @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3.
+ * Similar to @ref CpuConcatenate::configure()
*
* @return a status
*/
@@ -74,9 +67,9 @@ public:
void run(ITensorPack &tensors) override;
private:
- std::vector<std::unique_ptr<ICpuKernel>> _concat_kernels;
- unsigned int _num_srcs;
- unsigned int _axis;
+ std::vector<std::unique_ptr<ICpuKernel>> _concat_kernels{};
+ unsigned int _num_srcs{ 0 };
+ unsigned int _axis{ 0 };
};
} // namespace cpu
} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h b/src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h
index 3f1ddf1dbe..53ee17f6d1 100644
--- a/src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h
+++ b/src/runtime/cpu/operators/CpuConvertFullyConnectedWeights.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_CONVERTFULLYCONNECTEDWEIGHTS_H
-#define ARM_COMPUTE_CPU_CONVERTFULLYCONNECTEDWEIGHTS_H
+#ifndef ARM_COMPUTE_CPU_CONVERT_FULLYCONNECTED_WEIGHTS_H
+#define ARM_COMPUTE_CPU_CONVERT_FULLYCONNECTED_WEIGHTS_H
#include "src/runtime/cpu/ICpuOperator.h"
@@ -34,8 +34,6 @@ namespace cpu
class CpuConvertFullyConnectedWeights : public ICpuOperator
{
public:
- /** Constructor */
- CpuConvertFullyConnectedWeights() = default;
/** Configure operator for a given list of arguments
*
* @param[in] src Source tensor to permute. Data types supported: All
@@ -44,12 +42,9 @@ public:
* @param[in] data_layout The data layout the weights have been trained in.
*/
void configure(const ITensorInfo *src, ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuConvertFullyConnectedWeights
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor to permute. Data types supported: All
- * @param[in] dst Destination tensor. Data types supported: Same as @p dst
- * @param[in] original_src_shape Shape of the original src tensor (the one entering fully connected layer).
- * @param[in] data_layout The data layout the weights have been trained in.
+ * Similar to @ref CpuConvertFullyConnectedWeights::configure()
*
* @return a status
*/
@@ -59,4 +54,4 @@ public:
};
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_CONVERTFULLYCONNECTEDWEIGHTS_H */
+#endif /* ARM_COMPUTE_CPU_CONVERT_FULLYCONNECTED_WEIGHTS_H */
diff --git a/src/runtime/cpu/operators/CpuCopy.h b/src/runtime/cpu/operators/CpuCopy.h
index 057bb6efa5..861bbb7849 100644
--- a/src/runtime/cpu/operators/CpuCopy.h
+++ b/src/runtime/cpu/operators/CpuCopy.h
@@ -34,19 +34,15 @@ namespace cpu
class CpuCopy : public ICpuOperator
{
public:
- /** Constructor */
- CpuCopy() = default;
/** Configure operator for a given list of arguments
*
* @param[in] src Source tensor info. Data type supported: All
* @param[out] dst Destination info. Data type supported: Same as @p src
*/
void configure(const ITensorInfo *src, ITensorInfo *dst);
-
- /** Static function to check if given info will lead to a valid configuration of @ref CpuCopy
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor info. Data type supported: All
- * @param[in] dst Destination tensor info. Data type supported: Same as @p src
+ * Similar to @ref CpuCopy::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp b/src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp
index f577e94def..8141487125 100644
--- a/src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp
+++ b/src/runtime/cpu/operators/CpuDepthwiseConv2d.cpp
@@ -71,12 +71,6 @@ Status validate_arguments_optimized(const ITensorInfo *src, const ITensorInfo *w
}
} // namespace
-CpuDepthwiseConv2d::CpuDepthwiseConv2dOptimizedInternal::CpuDepthwiseConv2dOptimizedInternal()
- : _dwc_optimized_func(nullptr), _permute_input(nullptr), _permute_weights(nullptr), _permute_output(nullptr), _activationlayer_function(nullptr), _has_bias(false), _is_quantized(false),
- _is_nchw(true), _permute(false), _is_activationlayer_enabled(false), _is_prepared(false)
-{
-}
-
void CpuDepthwiseConv2d::CpuDepthwiseConv2dOptimizedInternal::configure(ITensorInfo *src,
const ITensorInfo *weights,
const ITensorInfo *biases,
@@ -264,12 +258,6 @@ void CpuDepthwiseConv2d::CpuDepthwiseConv2dOptimizedInternal::prepare(ITensorPac
}
}
-CpuDepthwiseConv2d::CpuDepthwiseConv2dGeneric::CpuDepthwiseConv2dGeneric()
- : _depthwise_conv_kernel(nullptr), _permute_input(nullptr), _permute_weights(nullptr), _permute_output(nullptr), _activationlayer_function(nullptr), _is_nchw(true), _is_prepared(false),
- _is_activationlayer_enabled(false)
-{
-}
-
void CpuDepthwiseConv2d::CpuDepthwiseConv2dGeneric::configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
@@ -432,11 +420,6 @@ void CpuDepthwiseConv2d::CpuDepthwiseConv2dGeneric::prepare(ITensorPack &tensors
}
}
-CpuDepthwiseConv2d::CpuDepthwiseConv2d()
- : _depth_conv_func(DepthwiseConvolutionFunction::GENERIC), _func_optimized(), _func_generic()
-{
-}
-
void CpuDepthwiseConv2d::configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info)
{
_depth_conv_func = get_depthwiseconvolution_function(src, weights, (biases != nullptr) ? biases : nullptr, dst, info);
diff --git a/src/runtime/cpu/operators/CpuDepthwiseConv2d.h b/src/runtime/cpu/operators/CpuDepthwiseConv2d.h
index ae9f894aab..dd4839b28a 100644
--- a/src/runtime/cpu/operators/CpuDepthwiseConv2d.h
+++ b/src/runtime/cpu/operators/CpuDepthwiseConv2d.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_DEPTHWISECONV2D_H
-#define ARM_COMPUTE_CPU_DEPTHWISECONV2D_H
+#ifndef ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_H
+#define ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_H
#include "arm_compute/core/ITensorInfo.h"
#include "arm_compute/core/experimental/Types.h"
@@ -45,7 +45,7 @@ class CpuDepthwiseConv2d : public ICpuOperator
{
public:
/** Default constructor */
- CpuDepthwiseConv2d();
+ CpuDepthwiseConv2d() = default;
/** Initialize the function's source, destination, weights and convolution information.
*
* @param[in, out] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32
@@ -57,7 +57,6 @@ public:
* @param[in] info Depthwise convolution meta-data.
*/
void configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info);
-
/** Static function to check if given info will lead to a valid configuration
*
* Similar to CpuDepthwiseConv2d::configure()
@@ -65,7 +64,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ConvolutionInfo &info);
-
/** Static function to choose the best depthwise convolution function for @ref CpuDepthwiseConv2d
*
* @param[in] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32
@@ -100,7 +98,7 @@ private:
{
public:
/** Default constructor */
- CpuDepthwiseConv2dOptimizedInternal();
+ CpuDepthwiseConv2dOptimizedInternal() = default;
/** Prevent instances of this class from being copied (As this class contains pointers) */
CpuDepthwiseConv2dOptimizedInternal(const CpuDepthwiseConv2dOptimizedInternal &) = delete;
/** Default move constructor */
@@ -121,7 +119,6 @@ private:
* @param[in] info Depthwise convolution meta-data.
*/
void configure(ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info);
-
/** Static function to check if given info will lead to a valid configuration
*
* Similar to CpuDepthwiseConv2dOptimizedInternal::configure()
@@ -157,7 +154,7 @@ private:
{
public:
/** Default constructor */
- CpuDepthwiseConv2dGeneric();
+ CpuDepthwiseConv2dGeneric() = default;
/** Prevent instances of this class from being copied (As this class contains pointers) */
CpuDepthwiseConv2dGeneric(const CpuDepthwiseConv2dGeneric &) = delete;
/** Default move constructor */
@@ -203,10 +200,10 @@ private:
bool _is_activationlayer_enabled{ false };
};
- DepthwiseConvolutionFunction _depth_conv_func;
- CpuDepthwiseConv2dOptimizedInternal _func_optimized;
- CpuDepthwiseConv2dGeneric _func_generic;
+ DepthwiseConvolutionFunction _depth_conv_func{ DepthwiseConvolutionFunction::GENERIC };
+ CpuDepthwiseConv2dOptimizedInternal _func_optimized{};
+ CpuDepthwiseConv2dGeneric _func_generic{};
};
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_DEPTHWISECONV2D_H */
+#endif /* ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_H */
diff --git a/src/runtime/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h b/src/runtime/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h
index 70845163f4..f3d3b618c6 100644
--- a/src/runtime/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h
+++ b/src/runtime/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h
@@ -35,10 +35,8 @@ namespace cpu
class CpuDepthwiseConv2dAssemblyDispatch : public ICpuOperator
{
public:
- /** Default constructor */
CpuDepthwiseConv2dAssemblyDispatch();
ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDepthwiseConv2dAssemblyDispatch);
- /** Default destructor */
~CpuDepthwiseConv2dAssemblyDispatch();
/** Initialize the function's source, destination, kernels and border_size.
*
@@ -67,6 +65,7 @@ public:
* @return True if activation is supported else false
*/
static bool is_activation_supported(const ActivationLayerInfo &activation);
+
// Inherited methods overridden:
void run(ITensorPack &tensors) override;
void prepare(ITensorPack &tensors) override;
diff --git a/src/runtime/cpu/operators/CpuDequantize.h b/src/runtime/cpu/operators/CpuDequantize.h
index d1fb9e8d0e..fdbd6a57c2 100644
--- a/src/runtime/cpu/operators/CpuDequantize.h
+++ b/src/runtime/cpu/operators/CpuDequantize.h
@@ -34,8 +34,6 @@ namespace cpu
class CpuDequantize : public ICpuOperator
{
public:
- /** Default Constructor */
- CpuDequantize() = default;
/** Configure the kernel.
*
* @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
diff --git a/src/runtime/cpu/operators/CpuDirectConv2d.h b/src/runtime/cpu/operators/CpuDirectConv2d.h
index 9e584b9c49..c17b076f85 100644
--- a/src/runtime/cpu/operators/CpuDirectConv2d.h
+++ b/src/runtime/cpu/operators/CpuDirectConv2d.h
@@ -55,9 +55,7 @@ namespace cpu
class CpuDirectConv2d : public ICpuOperator
{
public:
- /** Constructor */
CpuDirectConv2d(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
- /** Destructor */
~CpuDirectConv2d();
/** Set the input, weights, biases and output tensors.
*
diff --git a/src/runtime/cpu/operators/CpuElementwise.h b/src/runtime/cpu/operators/CpuElementwise.h
index 899a2ffdb7..ef5caf2825 100644
--- a/src/runtime/cpu/operators/CpuElementwise.h
+++ b/src/runtime/cpu/operators/CpuElementwise.h
@@ -52,13 +52,11 @@ public:
* @param[out] dst The output tensor information.
*/
void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst);
- /** Static function to check if the given information will lead to a valid configuration
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src0 The first source tensor information.
- * @param[in] src1 The second source tensor information. With PRelu, this is used as alpha tensor.
- * @param[out] dst The output tensor information.
+ * Similar to @ref CpuElementwiseArithmetic::configure()
*
- * @return A status
+ * @return a status
*/
static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst);
};
@@ -85,11 +83,9 @@ public:
* @param[out] dst Output tensor info. Data types supported: Same as @p src0.
*/
void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref cpu::kernels::CpuArithmeticKernel for division
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src0 First tensor input info. Data types supported: S32/F16/F32.
- * @param[in] src1 Second tensor input info. Data types supported: Same as @p src0.
- * @param[in] dst Output tensor info. Data types supported: Same as @p src0.
+ * Similar to @ref CpuElementwiseDivision::configure()
*
* @return a status
*/
@@ -112,11 +108,9 @@ public:
* @param[out] dst Output tensor info. Data types supported: Same as @p src0.
*/
void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref cpu::kernels::CpuArithmeticKernel for power
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src0 First tensor input info. Data types supported: F16/F32.
- * @param[in] src1 Second tensor input info. Data types supported: Same as @p src0.
- * @param[in] dst Output tensor info. Data types supported: Same as @p src0.
+ * Similar to @ref CpuElementwisePower::configure()
*
* @return a status
*/
@@ -139,12 +133,9 @@ public:
* @param[in] op Comparison Operation to be performed.
*/
void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ComparisonOperation op);
- /** Static function to check if given info will lead to a valid configuration of @ref cpu::kernels::CpuComparisonKernel
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src0 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
- * @param[in] src1 Second tensor input info. Data types supported: Same as @p src0.
- * @param[in] dst Output tensor info. Data types supported: U16/U32.
- * @param[in] op Comparison Operation to be performed.
+ * Similar to @ref CpuElementwiseComparison::configure()
*
* @return a status
*/
@@ -167,11 +158,9 @@ public:
* @param[out] dst Output tensor info. Data types supported: U16/U32.
*/
void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref cpu::kernels::CpuComparisonKernel
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src0 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
- * @param[in] src1 Second tensor input info. Data types supported: Same as @p src0.
- * @param[in] dst Output tensor info. Data types supported: U16/U32.
+ * Similar to @ref CpuElementwiseComparisonStatic::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuElementwiseUnary.h b/src/runtime/cpu/operators/CpuElementwiseUnary.h
index 721ba2a85b..5ea29e07e9 100644
--- a/src/runtime/cpu/operators/CpuElementwiseUnary.h
+++ b/src/runtime/cpu/operators/CpuElementwiseUnary.h
@@ -43,9 +43,7 @@ public:
void configure(ElementWiseUnary op, const ITensorInfo &src, ITensorInfo &dst);
/** Static function to check if given info will lead to a valid configuration
*
- * @param[in] op Unary operation to execute
- * @param[in] src First tensor input info. Data types supported: F16/F32, F16/F32/S32 for NEG/ABS operations.
- * @param[in] dst Output tensor info. Data types supported: Same as @p input.
+ * Similar to @ref CpuElementwiseUnary::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuFill.h b/src/runtime/cpu/operators/CpuFill.h
index fac8e76481..b946467da6 100644
--- a/src/runtime/cpu/operators/CpuFill.h
+++ b/src/runtime/cpu/operators/CpuFill.h
@@ -34,8 +34,6 @@ namespace cpu
class CpuFill : public ICpuOperator
{
public:
- /** Constructor */
- CpuFill() = default;
/** Configure operator for a given list of arguments
*
* @param[in,out] tensor Tensor to fill. Supported data types: All
diff --git a/src/runtime/cpu/operators/CpuFlatten.h b/src/runtime/cpu/operators/CpuFlatten.h
index ae71453988..3e24a93429 100644
--- a/src/runtime/cpu/operators/CpuFlatten.h
+++ b/src/runtime/cpu/operators/CpuFlatten.h
@@ -34,8 +34,6 @@ namespace cpu
class CpuFlatten : public ICpuOperator
{
public:
- /** Constructor */
- CpuFlatten() = default;
/** Configure operator for a given list of arguments
*
* Valid data layouts:
diff --git a/src/runtime/cpu/operators/CpuFloor.h b/src/runtime/cpu/operators/CpuFloor.h
index cbb9d565eb..0cd0cc0b4e 100644
--- a/src/runtime/cpu/operators/CpuFloor.h
+++ b/src/runtime/cpu/operators/CpuFloor.h
@@ -34,18 +34,15 @@ namespace cpu
class CpuFloor : public ICpuOperator
{
public:
- /** Constructor */
- CpuFloor() = default;
/** Configure operator for a given list of arguments
*
* @param[in] src Source tensor info. Data types supported: F16/F32.
* @param[in] dst Destination tensor info. Data type supported: same as @p src
*/
void configure(const ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuFloor
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor info. Data types supported: F16/F32.
- * @param[in] dst Destination tensor info. Data type supported: same as @p src
+ * Similar to @ref CpuFloor::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuGemmDirectConv2d.h b/src/runtime/cpu/operators/CpuGemmDirectConv2d.h
index b572f36a3a..7fb20b3037 100644
--- a/src/runtime/cpu/operators/CpuGemmDirectConv2d.h
+++ b/src/runtime/cpu/operators/CpuGemmDirectConv2d.h
@@ -41,10 +41,8 @@ namespace cpu
class CpuGemmDirectConv2d : public ICpuOperator
{
public:
- /** Constructor */
CpuGemmDirectConv2d();
ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmDirectConv2d);
- /** Destructor */
~CpuGemmDirectConv2d();
/** Set the input and output tensors.
*
diff --git a/src/runtime/cpu/operators/CpuGemmLowpOutputStage.h b/src/runtime/cpu/operators/CpuGemmLowpOutputStage.h
index 848aaea3e7..bed88a60d5 100644
--- a/src/runtime/cpu/operators/CpuGemmLowpOutputStage.h
+++ b/src/runtime/cpu/operators/CpuGemmLowpOutputStage.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_GEMMLOWP_OUTPUTSTAGE_H
-#define ARM_COMPUTE_CPU_GEMMLOWP_OUTPUTSTAGE_H
+#ifndef ARM_COMPUTE_CPU_GEMMLOWP_OUTPUT_STAGE_H
+#define ARM_COMPUTE_CPU_GEMMLOWP_OUTPUT_STAGE_H
#include "arm_compute/core/Types.h"
#include "src/runtime/cpu/ICpuOperator.h"
@@ -51,10 +51,6 @@ namespace cpu
class CpuGemmLowpOutputStage : public ICpuOperator
{
public:
- /** Constructor */
- CpuGemmLowpOutputStage() = default;
- /** Default destructor */
- ~CpuGemmLowpOutputStage() = default;
/** Initialise the kernel's inputs, output
*
* Valid data layouts:
@@ -87,4 +83,4 @@ public:
};
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_GEMMLOWP_OUTPUTSTAGE_H */
+#endif /* ARM_COMPUTE_CPU_GEMMLOWP_OUTPUT_STAGE_H */
diff --git a/src/runtime/cpu/operators/CpuMul.h b/src/runtime/cpu/operators/CpuMul.h
index 6e717188a4..da518c4461 100644
--- a/src/runtime/cpu/operators/CpuMul.h
+++ b/src/runtime/cpu/operators/CpuMul.h
@@ -35,8 +35,6 @@ namespace cpu
class CpuMul : public ICpuOperator
{
public:
- /** Default Constructor */
- CpuMul() = default;
/** Initialise the kernel's inputs, dst and convertion policy.
*
* @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
@@ -81,8 +79,6 @@ public:
class CpuComplexMul : public ICpuOperator
{
public:
- /** Default Constructor */
- CpuComplexMul() = default;
/** Initialise the kernel's inputs, dst.
*
* @param[in, out] src1 First input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
diff --git a/src/runtime/cpu/operators/CpuPermute.h b/src/runtime/cpu/operators/CpuPermute.h
index 2b30d7fbd8..2500017c0e 100644
--- a/src/runtime/cpu/operators/CpuPermute.h
+++ b/src/runtime/cpu/operators/CpuPermute.h
@@ -34,8 +34,6 @@ namespace cpu
class CpuPermute : public ICpuOperator
{
public:
- /** Constructor */
- CpuPermute() = default;
/** Configure operator for a given list of arguments
*
* @note Arbitrary permutation vectors are supported with rank not greater than 4
@@ -45,13 +43,9 @@ public:
* @param[in] perm Permutation vector
*/
void configure(const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuPermute
+ /** Static function to check if given info will lead to a valid configuration
*
- * @note Arbitrary permutation vectors are supported with rank not greater than 4
- *
- * @param[in] src Source tensor to permute. Data types supported: All
- * @param[in] dst Destination tensor. Data types supported: Same as @p dst
- * @param[in] perm Permutation vector
+ * Similar to @ref CpuPermute::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuPool2d.h b/src/runtime/cpu/operators/CpuPool2d.h
index 68416b5cfc..7feff91612 100644
--- a/src/runtime/cpu/operators/CpuPool2d.h
+++ b/src/runtime/cpu/operators/CpuPool2d.h
@@ -46,10 +46,8 @@ namespace cpu
class CpuPool2d : public ICpuOperator
{
public:
- /** Constructor */
CpuPool2d();
ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuPool2d);
- /** Default destructor */
~CpuPool2d();
/** Set the src and dst tensors.
*
diff --git a/src/runtime/cpu/operators/CpuQuantize.h b/src/runtime/cpu/operators/CpuQuantize.h
index 09afffd920..9a34a36bcc 100644
--- a/src/runtime/cpu/operators/CpuQuantize.h
+++ b/src/runtime/cpu/operators/CpuQuantize.h
@@ -34,8 +34,6 @@ namespace cpu
class CpuQuantize : public ICpuOperator
{
public:
- /** Default Constructor */
- CpuQuantize() = default;
/** Set the input and output tensors.
*
* @param[in] src Source tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
diff --git a/src/runtime/cpu/operators/CpuReshape.h b/src/runtime/cpu/operators/CpuReshape.h
index e136043568..581b55e0ef 100644
--- a/src/runtime/cpu/operators/CpuReshape.h
+++ b/src/runtime/cpu/operators/CpuReshape.h
@@ -34,19 +34,15 @@ namespace cpu
class CpuReshape : public ICpuOperator
{
public:
- /** Constructor */
- CpuReshape() = default;
/** Configure operator for a given list of arguments
*
* @param[in] src Source tensor info. Data type supported: All
* @param[out] dst Destination info. Data type supported: Same as @p src
*/
void configure(const ITensorInfo *src, ITensorInfo *dst);
-
- /** Static function to check if given info will lead to a valid configuration of @ref CpuReshape
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor info. Data type supported: All
- * @param[in] dst Destination tensor info. Data type supported: Same as @p src
+ * Similar to @ref CpuReshape::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuScale.cpp b/src/runtime/cpu/operators/CpuScale.cpp
index 681a15e26c..475cb2d4e8 100644
--- a/src/runtime/cpu/operators/CpuScale.cpp
+++ b/src/runtime/cpu/operators/CpuScale.cpp
@@ -86,22 +86,18 @@ void precompute_dx_dy_offsets(ITensor *dx, ITensor *dy, ITensor *offsets, float
}
} // namespace
-CpuScale::CpuScale()
- : _scale_info(InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::UNDEFINED), _data_layout(DataLayout::UNKNOWN), _is_prepared(false)
-{
-}
-
void CpuScale::configure(ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
ARM_COMPUTE_ERROR_THROW_ON(CpuScale::validate(src, dst, info));
- _scale_info = info;
+ _scale_info = info;
+ _is_prepared = false;
// Get data layout and width/height indices
- _data_layout = _scale_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : _scale_info.data_layout;
- const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
+ _data_layout = _scale_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : _scale_info.data_layout;
+ const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
// Compute the ratio between source width/height and destination width/height
const bool is_align_corners_used = _scale_info.align_corners && arm_compute::scale_utils::is_align_corners_allowed_sampling_policy(_scale_info.sampling_policy);
@@ -205,8 +201,8 @@ void CpuScale::prepare(ITensorPack &tensors)
auto offsets = tensors.get_tensor(TensorType::ACL_INT_2);
// Get data layout and width/height indices
- const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
// Compute the ratio between source width/height and destination width/height
const bool is_align_corners_used = _scale_info.align_corners && arm_compute::scale_utils::is_align_corners_allowed_sampling_policy(_scale_info.sampling_policy);
diff --git a/src/runtime/cpu/operators/CpuScale.h b/src/runtime/cpu/operators/CpuScale.h
index 90248a8d59..b83e04bc42 100644
--- a/src/runtime/cpu/operators/CpuScale.h
+++ b/src/runtime/cpu/operators/CpuScale.h
@@ -40,8 +40,6 @@ namespace cpu
class CpuScale : public ICpuOperator
{
public:
- /** Default Constructor */
- CpuScale();
/** Initialize the function's source, destination, interpolation type and border_mode.
*
* @param[in, out] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/U8/S16/F16/F32. (Written to only for @p border_mode != UNDEFINED)
@@ -49,11 +47,9 @@ public:
* @param[in] info @ref ScaleKernelInfo to be used for configuration
*/
void configure(ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info);
- /** Static function to check if given info will lead to a valid configuration of @ref NEScale
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/U8/S16/F16/F32. (Written to only for @p border_mode != UNDEFINED)
- * @param[in] dst Destination tensor info. Data type supported: Same as @p src. All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane.
- * @param[in] info @ref ScaleKernelInfo to be used for validation
+ * Similar to @ref CpuScale::configure()
*
* @return a status
*/
@@ -64,10 +60,10 @@ public:
void run(ITensorPack &tensors) override;
private:
- ScaleKernelInfo _scale_info;
- DataLayout _data_layout;
- bool _is_prepared;
+ ScaleKernelInfo _scale_info{ InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::UNDEFINED };
+ DataLayout _data_layout{ DataLayout::UNKNOWN };
+ bool _is_prepared{ false };
};
} // namespace cpu
} // namespace arm_compute
-#endif /*ARM_COMPUTE_CPU_SCALE_H */
+#endif /* ARM_COMPUTE_CPU_SCALE_H */
diff --git a/src/runtime/cpu/operators/CpuSoftmax.h b/src/runtime/cpu/operators/CpuSoftmax.h
index 38817977b3..a9ac803c09 100644
--- a/src/runtime/cpu/operators/CpuSoftmax.h
+++ b/src/runtime/cpu/operators/CpuSoftmax.h
@@ -57,7 +57,6 @@ template <bool IS_LOG = false>
class CpuSoftmaxGeneric : public ICpuOperator
{
public:
- /** Constructor */
CpuSoftmaxGeneric();
/** Set the input and output tensors.
*
@@ -69,14 +68,9 @@ public:
* axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
*/
void configure(const ITensorInfo *src, ITensorInfo *dst, float beta = 1.0f, int32_t axis = 0);
-
- /** Static function to check if given info will lead to a valid configuration of @ref CpuSoftmax
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] dst Destination tensor info. Data types supported: same as @p input
- * @param[in] beta (Optional) A scaling factor for the exponent.
- * @param[in] axis (Optional) The dimension in which to apply the function. E.g. for input of shape 4x5x6 and
- * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
+ * Similar to @ref CpuSoftmaxGeneric::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuSub.h b/src/runtime/cpu/operators/CpuSub.h
index 099ffef87e..aad01fe4dc 100644
--- a/src/runtime/cpu/operators/CpuSub.h
+++ b/src/runtime/cpu/operators/CpuSub.h
@@ -56,26 +56,9 @@ public:
* @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
*/
void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Static function to check if given info will lead to a valid configuration of @ref CpuSub
+ /** Static function to check if given info will lead to a valid configuration
*
- * Valid configurations (src0,src1) -> dst :
- *
- * - (U8,U8) -> U8
- * - (U8,U8) -> S16
- * - (QASYMM8, QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED, QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (S16,U8) -> S16
- * - (U8,S16) -> S16
- * - (S16,S16) -> S16
- * - (S32,S32) -> S32
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- *
- * @param[in] src0 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32
- * @param[in] src1 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32
- * @param[in] dst Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32
- * @param[in] policy Policy to use to handle overflow. Convert policy cannot be WRAP if datatype is quantized.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * Similar to @ref CpuSub::configure()
*
* @return a status
*/
diff --git a/src/runtime/cpu/operators/CpuTranspose.h b/src/runtime/cpu/operators/CpuTranspose.h
index c0232ddab2..0735924839 100644
--- a/src/runtime/cpu/operators/CpuTranspose.h
+++ b/src/runtime/cpu/operators/CpuTranspose.h
@@ -34,18 +34,15 @@ namespace cpu
class CpuTranspose : public ICpuOperator
{
public:
- /** Constructor */
- CpuTranspose() = default;
/** Configure operator for a given list of arguments
*
* @param[in] src Source tensor to permute. Data types supported: All
* @param[out] dst Destintation tensor. Data types supported: Same as @p src
*/
void configure(const ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuTranspose
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor to permute. Data types supported: All
- * @param[in] dst Destination tensor. Data types supported: Same as @p dst
+ * Similar to @ref CpuTranspose::configure()
*
* @return a status
*/