aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2017-11-30 14:25:57 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commitafa5d817b1d083837cd7ea30d32f845d82620c12 (patch)
tree1ca2a27ab7108b7137b96fc1547a8b5ac5d9c8f7 /arm_compute
parent631c41a4e3645a948b0f597caa77e8fa91ca0efc (diff)
downloadComputeLibrary-afa5d817b1d083837cd7ea30d32f845d82620c12.tar.gz
COMPMID-617 Add validation methods to Kernels
- NEActivationLayer - NESoftmax - NEDirectConvolutionLayer - NENormalizationLayer - NEPoolingLayer Change-Id: Ib279f1c1b7f9247679b0d6593aed7393da8fe87b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/111335 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/kernels/NEActivationLayerKernel.h10
-rw-r--r--arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h10
-rw-r--r--arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h14
-rw-r--r--arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h12
-rw-r--r--arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h10
-rw-r--r--arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h13
-rw-r--r--arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h28
-rw-r--r--arm_compute/runtime/NEON/functions/NEActivationLayer.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h20
-rw-r--r--arm_compute/runtime/NEON/functions/NENormalizationLayer.h12
-rw-r--r--arm_compute/runtime/NEON/functions/NEPoolingLayer.h11
-rw-r--r--arm_compute/runtime/NEON/functions/NESoftmaxLayer.h9
12 files changed, 154 insertions, 5 deletions
diff --git a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
index ef51cbe841..e8c032aaeb 100644
--- a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
@@ -59,6 +59,16 @@ public:
* @param[in] activation_info Activation layer information.
*/
void configure(ITensor *input, ITensor *output, ActivationLayerInfo activation_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEActivationLayerKernel
+ *
+ * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
+ * of the activation function. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] act_info Activation layer information.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h
index 14c8e9c7e0..05ade1c5dd 100644
--- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h
@@ -57,6 +57,16 @@ public:
* Data type supported: Same as @p input
*/
void configure(ITensor *input, const ITensor *bias, ITensor *output = nullptr);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayerBiasAccumulateKernel
+ *
+ * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
+ * Data type supported: QS8/QS16/F16/F32
+ * @param[in] bias The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
+ * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
+ * Data type supported: Same as @p input
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output = nullptr);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
index 370ddca480..4529120f02 100644
--- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
@@ -62,6 +62,20 @@ public:
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
*/
void configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayerKernel
+ *
+ * @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
+ * The 3rd dimension must be the same as the input's volume 3rd dimension.
+ * Data type supported:Same as @p input.
+ * @param[in] output Output tensor.
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h
index 40fae3520b..405daf1068 100644
--- a/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h
@@ -57,6 +57,18 @@ public:
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
void configure(const ITensor *input, const ITensor *input_squared, ITensor *output, NormalizationLayerInfo norm_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref NENormalizationLayerKernel
+ *
+ * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
+ * and an optional 4th dimension for batch of inputs. Data types supported: QS8/QS16/FP16/F32.
+ * @param[in] input_squared Source with each element has been squared. 3 lower dims represent a single input with dimensions [width, height, IFM],
+ * Data type supported: same as @p input
+ * @param[in] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
+ * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *input_squared, const ITensorInfo *output, NormalizationLayerInfo norm_info);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
index 05eb8d6ddc..10f990e7ef 100644
--- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
@@ -64,9 +64,13 @@ public:
void configure(const ITensor *input1, const ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplicationKernel
*
- * @param[in] input1 An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32
- * @param[in] input2 An input tensor info. Data types supported: U8, QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
- * @param[in] output The output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16 (only if @p input1 is F16), F32 (only if both inputs are F32).
+ * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
+ * For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
+ * For QS8/QS16 scale = 1 is the only supported value.
+ *
+ * @param[in] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8, QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
+ * @param[in] output The output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16 (only if @p input1 is F16), F32 (only if both inputs are F32).
* @param[in] scale Scale to apply after multiplication.
* Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy.
diff --git a/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h b/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
index 0a57a26f17..87d14e5f91 100644
--- a/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
@@ -55,6 +55,17 @@ public:
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*/
void configure(const ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEPoolingLayerKernel
+ *
+ * @note QS8, QS16 and F16 are supported for pool sizes 2 and 3 only
+ *
+ * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor. Data types supported: Same as @p input.
+ * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
@@ -144,7 +155,7 @@ private:
const ITensor *_input;
ITensor *_output;
PoolingLayerInfo _pool_info;
- int _num_elems_processed_per_iteration;
+ unsigned int _num_elems_processed_per_iteration;
BorderSize _border_size;
};
} // namespace arm_compute
diff --git a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
index c3e25181b6..0fecfac15a 100644
--- a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
@@ -43,6 +43,14 @@ public:
* @param[out] output Destination tensor. Data types supported: same as @p input
*/
void configure(const ITensor *input, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NELogits1DMaxKernel
+ *
+ * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32
+ * @param[in] output Destination tensor. Data types supported: same as @p input
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
@@ -81,6 +89,17 @@ public:
* @param[in] beta (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
*/
void configure(const ITensor *input, const ITensor *max, ITensor *output, ITensor *sum, float beta = 1.0f);
+ /** Static function to check if given info will lead to a valid configuration of @ref NELogits1DShiftExpSumKernel
+ *
+ * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32
+ * @param[in] max Max values tensor. Data types supported: same as @p input
+ * @param[in] output Destination tensor. Data types supported: same as @p input.
+ * @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p input.
+ * @param[in] beta (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum, float beta = 1.0f);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
@@ -120,6 +139,15 @@ public:
* @param[out] output Destination tensor. Data types supported: same as @p input.
*/
void configure(const ITensor *input, const ITensor *sum, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NELogits1DNormKernel
+ *
+ * @param[in] input Source tensor. Data types supported: QS8/QS16/S32/F16/F32
+ * @param[in] sum Sum tensor. The number of dimensions should be dim(input)-1. Data types supported: same as @p input.
+ * @param[in] output Destination tensor. Data types supported: same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/runtime/NEON/functions/NEActivationLayer.h b/arm_compute/runtime/NEON/functions/NEActivationLayer.h
index f3cd305910..007c53a0a8 100644
--- a/arm_compute/runtime/NEON/functions/NEActivationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEActivationLayer.h
@@ -49,6 +49,16 @@ public:
* @param[in] activation_info Activation layer parameters.
*/
void configure(ITensor *input, ITensor *output, ActivationLayerInfo activation_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEActivationLayer
+ *
+ * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
+ * of the activation function. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] act_info Activation layer information.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
};
}
#endif /* __ARM_COMPUTE_NEACTIVATIONLAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
index c731bf278f..6d8ce1f6f4 100644
--- a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
@@ -68,6 +68,26 @@ public:
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
*/
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayer
+ *
+ * @note: DirectConvolution only works in the following configurations:
+ * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
+ * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
+ * 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
+ *
+ * @param[in] input Input tensor. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] weights Set of kernels to convolve the input volume.
+ * Supported sizes: 1x1, 3x3 and 5x5.
+ * The 3rd dimension must be the same as the input's volume 3rd dimension.
+ * Data type supported: Same as @p input.
+ * @param[in] bias Set of biases. Data type supported: Same as @p input.
+ * @param[in] output Output tensor.
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
index 0d5656d602..4b5ad28706 100644
--- a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
@@ -55,11 +55,21 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data type supported: QS8/F16/F32
+ * and an optional 4th dimension for batch of inputs. Data type supported: QS8/QS16/F16/F32
* @param[out] output Destination with the same dimensions, data type and number of channels of @p input
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
void configure(const ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref NENormalizationLayer
+ *
+ * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
+ * and an optional 4th dimension for batch of inputs. Data type supported: QS8/QS16/F16/F32
+ * @param[in] output Destination with the same dimensions, data type and number of channels of @p input
+ * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const NormalizationLayerInfo &norm_info);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
index 7b038aaa51..0f8abb587d 100644
--- a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
@@ -53,6 +53,17 @@ public:
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*/
void configure(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEPoolingLayer
+ *
+ * @note QS8, QS16 and F16 are supported for pool sizes 2 and 3 only
+ *
+ * @param[in] input Source tensor. (Written to only when padding != 0) Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor. Data types supported: Same as @p input.
+ * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
index 38a0f2116f..5043f79c23 100644
--- a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
+++ b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
@@ -56,6 +56,15 @@ public:
* @param[in] beta (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
*/
void configure(ITensor *input, ITensor *output, float beta = 1.0f);
+ /** Static function to check if given info will lead to a valid configuration of @ref NESoftmaxLayer
+ *
+ * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32
+ * @param[in] output Destination tensor. Data types supported: same as @p input
+ * @param[in] beta (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f);
// Inherited methods overridden:
void run() override;