aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/CL/kernels/CLActivationLayerKernel.h10
-rw-r--r--arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h10
-rw-r--r--arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h10
-rw-r--r--arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h18
-rw-r--r--arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h14
-rw-r--r--arm_compute/core/CL/kernels/CLPoolingLayerKernel.h6
-rw-r--r--arm_compute/core/Error.h4
-rw-r--r--arm_compute/core/Validate.h44
-rw-r--r--arm_compute/runtime/CL/functions/CLActivationLayer.h10
-rw-r--r--arm_compute/runtime/CL/functions/CLArithmeticAddition.h10
-rw-r--r--arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h10
-rw-r--r--arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h20
-rw-r--r--arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h23
-rw-r--r--arm_compute/runtime/CL/functions/CLPoolingLayer.h6
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp28
-rw-r--r--src/core/CL/kernels/CLArithmeticAdditionKernel.cpp33
-rw-r--r--src/core/CL/kernels/CLArithmeticSubtractionKernel.cpp33
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp38
-rw-r--r--src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp52
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp8
-rw-r--r--src/runtime/CL/functions/CLActivationLayer.cpp5
-rw-r--r--src/runtime/CL/functions/CLArithmeticAddition.cpp5
-rw-r--r--src/runtime/CL/functions/CLArithmeticSubtraction.cpp5
-rw-r--r--src/runtime/CL/functions/CLBatchNormalizationLayer.cpp8
-rw-r--r--src/runtime/CL/functions/CLPixelWiseMultiplication.cpp6
-rw-r--r--tests/validation/CL/ActivationLayer.cpp35
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp32
-rw-r--r--tests/validation/CL/ArithmeticSubtraction.cpp32
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp37
-rw-r--r--tests/validation/CL/PixelWiseMultiplication.cpp40
30 files changed, 496 insertions, 96 deletions
diff --git a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
index dab133f05a..30bf6fb0d6 100644
--- a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
@@ -56,6 +56,16 @@ public:
* @param[in] act_info Activation layer information.
*/
void configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayerKernel
+ *
+ * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
+ * of the activation function. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] act_info Activation layer information.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
index 0895fe3f79..aa4b9d6c5f 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
@@ -59,6 +59,16 @@ public:
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAdditionKernel
+ *
+ * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] policy Policy to use to handle overflow.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
index d7755d5e31..13136d93e0 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
@@ -61,6 +61,16 @@ public:
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticSubtractionKernel
+ *
+ * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] policy Policy to use to handle overflow.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
index 26825efba9..a24432145a 100644
--- a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
@@ -63,6 +63,24 @@ public:
* @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
*/
void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayerKernel
+ *
+ * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
+ * 3 lower dimensions represent a single input with dimensions [width, height, FM].
+ * @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
+ * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] beta Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] epsilon Small value to avoid division with zero.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
index 309a202df3..ed876df06f 100644
--- a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
@@ -59,6 +59,20 @@ public:
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplicationKernel
+ *
+ * @param[in] input1 An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1.
+ * @param[in] output The output tensor info, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+ * @param[in] scale Scale to apply after multiplication.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+ * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
+ * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
index 29190e2dd1..a9159a4bb8 100644
--- a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
@@ -60,11 +60,11 @@ public:
void configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLPoolingLayerKernel
*
- * @param[in] input Input's tensor info
- * @param[in] output Output's tensor info
+ * @param[in] input Source tensor info. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor info. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
- * @return an error
+ * @return an error status
*/
static Error validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
diff --git a/arm_compute/core/Error.h b/arm_compute/core/Error.h
index 6e4aa6a4f8..fa3f9c0615 100644
--- a/arm_compute/core/Error.h
+++ b/arm_compute/core/Error.h
@@ -240,7 +240,7 @@ Error create_error(ErrorCode error_code, const char *function, const char *file,
*
* @param[in] error Error value to check.
*/
-#define ARM_COMPUTE_ERROR_THROW(error) \
+#define ARM_COMPUTE_ERROR_THROW_ON(error) \
error.throw_if_error();
/** If the condition is true, the given message is printed and an exception is thrown
@@ -282,7 +282,7 @@ Error create_error(ErrorCode error_code, const char *function, const char *file,
*/
#define ARM_COMPUTE_CONST_ON_ERROR(cond, val, msg) (cond) ? throw std::logic_error(msg) : val;
#else /* ARM_COMPUTE_ASSERTS_ENABLED */
-#define ARM_COMPUTE_ERROR_THROW(error)
+#define ARM_COMPUTE_ERROR_THROW_ON(error)
#define ARM_COMPUTE_ERROR_ON_MSG(cond, ...)
#define ARM_COMPUTE_ERROR_ON_LOC_MSG(cond, func, file, line, ...)
#define ARM_COMPUTE_CONST_ON_ERROR(cond, val, msg) val
diff --git a/arm_compute/core/Validate.h b/arm_compute/core/Validate.h
index 1e2d262683..2ca9f6b64e 100644
--- a/arm_compute/core/Validate.h
+++ b/arm_compute/core/Validate.h
@@ -142,7 +142,7 @@ inline arm_compute::Error error_on_nullptr(const char *function, const char *fil
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -164,7 +164,7 @@ inline arm_compute::Error error_on_nullptr(const char *function, const char *fil
arm_compute::Error error_on_mismatching_windows(const char *function, const char *file, const int line,
const Window &full, const Window &win);
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(f, w) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_WINDOWS(f, w) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
@@ -186,7 +186,7 @@ arm_compute::Error error_on_mismatching_windows(const char *function, const char
arm_compute::Error error_on_invalid_subwindow(const char *function, const char *file, const int line,
const Window &full, const Window &sub);
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBWINDOW(f, s) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
@@ -206,7 +206,7 @@ arm_compute::Error error_on_invalid_subwindow(const char *function, const char *
arm_compute::Error error_on_window_not_collapsable_at_dimension(const char *function, const char *file, const int line,
const Window &full, const Window &window, const int dim);
#define ARM_COMPUTE_ERROR_ON_WINDOW_NOT_COLLAPSABLE_AT_DIMENSION(f, w, d) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
#define ARM_COMPUTE_RETURN_ERROR_ON_WINDOW_NOT_COLLAPSABLE_AT_DIMENSION(f, w, d) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
@@ -225,7 +225,7 @@ arm_compute::Error error_on_window_not_collapsable_at_dimension(const char *func
arm_compute::Error error_on_coordinates_dimensions_gte(const char *function, const char *file, const int line,
const Coordinates &pos, unsigned int max_dim);
#define ARM_COMPUTE_ERROR_ON_COORDINATES_DIMENSIONS_GTE(p, md) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
#define ARM_COMPUTE_RETURN_ERROR_ON_COORDINATES_DIMENSIONS_GTE(p, md) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
@@ -244,7 +244,7 @@ arm_compute::Error error_on_coordinates_dimensions_gte(const char *function, con
arm_compute::Error error_on_window_dimensions_gte(const char *function, const char *file, const int line,
const Window &win, unsigned int max_dim);
#define ARM_COMPUTE_ERROR_ON_WINDOW_DIMENSIONS_GTE(w, md) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
#define ARM_COMPUTE_RETURN_ERROR_ON_WINDOW_DIMENSIONS_GTE(w, md) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
@@ -267,7 +267,7 @@ arm_compute::Error error_on_mismatching_dimensions(const char *function, const c
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -357,7 +357,7 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -408,7 +408,7 @@ inline arm_compute::Error error_on_mismatching_data_types(const char *function,
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -432,7 +432,7 @@ inline arm_compute::Error error_on_mismatching_fixed_point(const char *function,
DataType &&first_data_type = tensor_info_1->data_type();
const int first_fixed_point_position = tensor_info_1->fixed_point_position();
- if((first_data_type != DataType::QS8) && (first_data_type != DataType::QS16))
+ if(!is_data_type_fixed_point(first_data_type))
{
return arm_compute::Error{};
}
@@ -473,7 +473,7 @@ inline arm_compute::Error error_on_mismatching_fixed_point(const char *function,
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -556,7 +556,7 @@ inline arm_compute::Error error_on_data_type_not_in(const char *function, const
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(t, ...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(t, ...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
@@ -602,7 +602,7 @@ inline arm_compute::Error error_on_data_type_channel_not_in(const char *function
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c, ...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c, ...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
@@ -618,7 +618,7 @@ inline arm_compute::Error error_on_data_type_channel_not_in(const char *function
arm_compute::Error error_on_tensor_not_2d(const char *function, const char *file, const int line,
const ITensor *tensor);
#define ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(t) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
#define ARM_COMPUTE_RETURN_ERROR_ON_TENSOR_NOT_2D(t) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
@@ -648,7 +648,7 @@ inline arm_compute::Error error_on_channel_not_in(const char *function, const ch
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_CHANNEL_NOT_IN(c, ...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_CHANNEL_NOT_IN(c, ...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
@@ -665,7 +665,7 @@ inline arm_compute::Error error_on_channel_not_in(const char *function, const ch
arm_compute::Error error_on_channel_not_in_known_format(const char *function, const char *file, const int line,
Format fmt, Channel cn);
#define ARM_COMPUTE_ERROR_ON_CHANNEL_NOT_IN_KNOWN_FORMAT(f, c) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
#define ARM_COMPUTE_RETURN_ERROR_ON_CHANNEL_NOT_IN_KNOWN_FORMAT(f, c) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
@@ -687,7 +687,7 @@ arm_compute::Error error_on_channel_not_in_known_format(const char *function, co
arm_compute::Error error_on_invalid_multi_hog(const char *function, const char *file, const int line,
const IMultiHOG *multi_hog);
#define ARM_COMPUTE_ERROR_ON_INVALID_MULTI_HOG(m) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_MULTI_HOG(m) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
@@ -701,7 +701,7 @@ arm_compute::Error error_on_invalid_multi_hog(const char *function, const char *
arm_compute::Error error_on_unconfigured_kernel(const char *function, const char *file, const int line,
const IKernel *kernel);
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
#define ARM_COMPUTE_RETURN_ERROR_ON_UNCONFIGURED_KERNEL(k) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
@@ -719,7 +719,7 @@ arm_compute::Error error_on_unconfigured_kernel(const char *function, const char
arm_compute::Error error_on_invalid_subtensor(const char *function, const char *file, const int line,
const TensorShape &parent_shape, const Coordinates &coords, const TensorShape &shape);
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBTENSOR(p, c, s) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBTENSOR(p, c, s) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
@@ -736,7 +736,7 @@ arm_compute::Error error_on_invalid_subtensor(const char *function, const char *
arm_compute::Error error_on_invalid_subtensor_valid_region(const char *function, const char *file, const int line,
const ValidRegion &parent_valid_region, const ValidRegion &valid_region);
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBTENSOR_VALID_REGION(pv, sv) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBTENSOR_VALID_REGION(pv, sv) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
@@ -783,7 +783,7 @@ inline arm_compute::Error error_on_mismatching_fixed_point_position(const char *
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -827,7 +827,7 @@ inline arm_compute::Error error_on_value_not_representable_in_fixed_point(const
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
}
diff --git a/arm_compute/runtime/CL/functions/CLActivationLayer.h b/arm_compute/runtime/CL/functions/CLActivationLayer.h
index a1aeb193d1..037ae71e7f 100644
--- a/arm_compute/runtime/CL/functions/CLActivationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLActivationLayer.h
@@ -49,6 +49,16 @@ public:
* @param[in] act_info Activation layer parameters.
*/
void configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayer
+ *
+ * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
+ * of the activation function. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] act_info Activation layer information.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
};
}
#endif /* __ARM_COMPUTE_CLACTIVATIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h b/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
index f888256b33..0c54147e6c 100644
--- a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
+++ b/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
@@ -47,6 +47,16 @@ public:
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAddition
+ *
+ * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] policy Policy to use to handle overflow.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
};
}
#endif /* __ARM_COMPUTE_CLARITHMETICADDITION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h b/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
index eedeaa8d0e..749f1b26b3 100644
--- a/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
+++ b/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
@@ -48,6 +48,16 @@ public:
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticSubtraction
+ *
+ * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] policy Policy to use to handle overflow.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
};
}
#endif /* __ARM_COMPUTE_CLARITHMETICSUBTRACTION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
index ffb66bee60..70a201a1f8 100644
--- a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
@@ -51,14 +51,32 @@ public:
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
* 3 lower dimensions represent a single input with dimensions [width, height, FM].
* The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
* @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] epsilon Small value to avoid division with zero.
- * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
*/
void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayer
+ *
+ * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
+ * 3 lower dimensions represent a single input with dimensions [width, height, FM].
+ * @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
+ * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] beta Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] epsilon Small value to avoid division with zero.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
index 71754fc3f4..4fdcd750bd 100644
--- a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
+++ b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
@@ -37,15 +37,30 @@ class CLPixelWiseMultiplication : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs, output and convertion policy.
*
- * @param[in] input1 First tensor input. Data types supported: U8, S16, F16 or F32.
- * @param[in] input2 Second tensor input. Data types supported: U8, S16, F16 or F32.
- * @param[out] output Output tensor. Data types supported: U8(Only if both inputs are U8), S16, F16 or F32.
- * @param[in] scale Scale to apply after multiplication. Must be positive.
+ * @param[in] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 An input tensor. Data types supported: same as @p input1.
+ * @param[out] output The output tensor, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+ * @param[in] scale Scale to apply after multiplication.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplication
+ *
+ * @param[in] input1 An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1.
+ * @param[in] output The output tensor info, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+ * @param[in] scale Scale to apply after multiplication.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+ * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
+ * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
};
}
#endif /*__ARM_COMPUTE_CLPIXELWISEMULTIPLICATION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLPoolingLayer.h b/arm_compute/runtime/CL/functions/CLPoolingLayer.h
index f7fd114be1..9c51534f78 100644
--- a/arm_compute/runtime/CL/functions/CLPoolingLayer.h
+++ b/arm_compute/runtime/CL/functions/CLPoolingLayer.h
@@ -50,11 +50,11 @@ public:
void configure(ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLPoolingLayer
*
- * @param[in] input Input's tensor info
- * @param[in] output Output's tensor info
+ * @param[in] input Source tensor info. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor info. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
- * @return an expected value
+ * @return an error status
*/
static Error validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
};
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index ca6760d8c2..5bfc832518 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -50,12 +50,11 @@ CLActivationLayerKernel::CLActivationLayerKernel()
void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32, DataType::QASYMM8);
- ARM_COMPUTE_ERROR_ON_MSG((input->info()->data_type() == DataType::QASYMM8) && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
- "For QASYMM8 only lower/upper bounded relu is supported");
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
if(output != nullptr)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input->info(), output->info());
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(),
input->info()->tensor_shape(),
@@ -63,12 +62,10 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
input->info()->data_type(),
input->info()->fixed_point_position(),
input->info()->quantization_info());
-
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
}
+ ARM_COMPUTE_ERROR_THROW_ON(CLActivationLayerKernel::validate(input->info(), (output != nullptr) ? output->info() : nullptr, act_info));
+
const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
const DataType dt = input->info()->data_type();
const int fixed_point_position = input->info()->fixed_point_position();
@@ -156,6 +153,23 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
ICLKernel::configure(win);
}
+Error CLActivationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->data_type() == DataType::QASYMM8) && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
+ "For QASYMM8 only lower/upper bounded relu is supported");
+
+ // Checks performed when output is configured
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+ }
+
+ return Error{};
+}
+
void CLActivationLayerKernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
diff --git a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp
index 65422c2bbf..a7625f4303 100644
--- a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp
+++ b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp
@@ -68,17 +68,7 @@ void CLArithmeticAdditionKernel::configure(const ICLTensor *input1, const ICLTen
}
}
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MSG(output->info()->data_type() == DataType::U8 && (input1->info()->data_type() != DataType::U8 || input2->info()->data_type() != DataType::U8),
- "Output can only be U8 if both inputs are U8");
- if(is_data_type_fixed_point(input1->info()->data_type()) || is_data_type_fixed_point(input2->info()->data_type()) || is_data_type_fixed_point(output->info()->data_type()))
- {
- // Check that all data types are the same and all fixed-point positions are the same
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2, output);
- }
+ ARM_COMPUTE_ERROR_THROW_ON(CLArithmeticAdditionKernel::validate(input1->info(), input2->info(), output->info(), policy));
_input1 = input1;
_input2 = input2;
@@ -119,6 +109,27 @@ void CLArithmeticAdditionKernel::configure(const ICLTensor *input1, const ICLTen
ICLKernel::configure(win);
}
+Error CLArithmeticAdditionKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+{
+ ARM_COMPUTE_UNUSED(policy);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, input2);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2);
+
+ // Validate in case of configured output
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
+ "Output can only be U8 if both inputs are U8");
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, output);
+ }
+
+ return Error{};
+}
+
void CLArithmeticAdditionKernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
diff --git a/src/core/CL/kernels/CLArithmeticSubtractionKernel.cpp b/src/core/CL/kernels/CLArithmeticSubtractionKernel.cpp
index c5183af7d7..47d77ad8a9 100644
--- a/src/core/CL/kernels/CLArithmeticSubtractionKernel.cpp
+++ b/src/core/CL/kernels/CLArithmeticSubtractionKernel.cpp
@@ -61,17 +61,7 @@ void CLArithmeticSubtractionKernel::configure(const ICLTensor *input1, const ICL
}
}
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MSG(output->info()->data_type() == DataType::U8 && (input1->info()->data_type() != DataType::U8 || input2->info()->data_type() != DataType::U8),
- "Output can only be U8 if both inputs are U8");
- if(is_data_type_fixed_point(input1->info()->data_type()) || is_data_type_fixed_point(input2->info()->data_type()) || is_data_type_fixed_point(output->info()->data_type()))
- {
- // Check that all data types are the same and all fixed-point positions are the same
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2, output);
- }
+ ARM_COMPUTE_ERROR_THROW_ON(CLArithmeticSubtractionKernel::validate(input1->info(), input2->info(), output->info(), policy));
_input1 = input1;
_input2 = input2;
@@ -111,6 +101,27 @@ void CLArithmeticSubtractionKernel::configure(const ICLTensor *input1, const ICL
ICLKernel::configure(win);
}
+Error CLArithmeticSubtractionKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+{
+ ARM_COMPUTE_UNUSED(policy);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, input2);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2);
+
+ // Validate in case of configured output
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
+ "Output can only be U8 if both inputs are U8");
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, output);
+ }
+
+ return Error{};
+}
+
void CLArithmeticSubtractionKernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index 43f39f423f..f17091166c 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -45,7 +45,7 @@ CLBatchNormalizationLayerKernel::CLBatchNormalizationLayerKernel()
void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma,
float epsilon)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var, beta, gamma);
_input = input;
_output = output;
@@ -57,21 +57,13 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
if(output != nullptr)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input->info(), output->info());
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
-
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, mean, var, beta, gamma);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, mean, var, beta, gamma);
- }
- else
- {
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var, beta, gamma);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var, beta, gamma);
}
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma);
- ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != mean->info()->dimension(0));
+ ARM_COMPUTE_ERROR_THROW_ON(CLBatchNormalizationLayerKernel::validate(input->info(), (output != nullptr) ? output->info() : nullptr,
+ mean->info(), var->info(), beta->info(), gamma->info(), epsilon));
const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
@@ -108,6 +100,28 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
ICLKernel::configure(win);
}
+Error CLBatchNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon)
+{
+ ARM_COMPUTE_UNUSED(epsilon);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var, beta, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var, beta, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != mean->dimension(0));
+
+ if(output != nullptr && output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+ }
+
+ return Error{};
+}
+
void CLBatchNormalizationLayerKernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
index 33c8b81c1d..5e35c8c1ff 100644
--- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
+++ b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
@@ -64,20 +64,8 @@ void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const I
}
}
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MSG(output->info()->data_type() == DataType::U8 && (input1->info()->data_type() != DataType::U8 || input2->info()->data_type() != DataType::U8),
- "Output can only be U8 if both inputs are U8");
- ARM_COMPUTE_ERROR_ON_MSG(scale < 0, "Scale cannot be negative. ");
- if(is_data_type_fixed_point(input1->info()->data_type()))
- {
- // All data types must be all QS8 or all QS16
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input1, input2, output);
- ARM_COMPUTE_ERROR_ON_MSG(scale != 1, "Unsupported scaling factor for QS8/QS16. Scale must be 1.");
- }
+ ARM_COMPUTE_ERROR_THROW_ON(CLPixelWiseMultiplicationKernel::validate(input1->info(), input2->info(), output->info(),
+ scale, overflow_policy, rounding_policy));
_input1 = input1;
_input2 = input2;
@@ -178,6 +166,42 @@ void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const I
ICLKernel::configure(win);
}
+Error CLPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+{
+ ARM_COMPUTE_UNUSED(overflow_policy);
+ ARM_COMPUTE_UNUSED(rounding_policy);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, input2);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale < 0, "Scale cannot be negative.");
+
+ if(is_data_type_fixed_point(input1->data_type()))
+ {
+ // All data types must be all QS8 or all QS16
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale != 1, "Unsupported scaling factor for QS8/QS16. Scale must be 1.");
+ }
+
+ // Validate in case of configured output
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
+ "Output can only be U8 if both inputs are U8");
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, output);
+ if(is_data_type_fixed_point(input1->data_type()))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output);
+ }
+ }
+
+ return Error{};
+}
+
void CLPixelWiseMultiplicationKernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index 19d36a6ac6..2854cd8265 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -65,9 +65,7 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_NULLPTR(output);
- ARM_COMPUTE_ERROR_ON(pool_pad_x >= pool_size || pool_pad_y >= pool_size);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Check output dimensions
std::tie(pooled_w, pooled_h) = scaled_dimensions(input->info()->dimension(0),
@@ -85,9 +83,7 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position());
}
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
- ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) != pooled_w) || (output->info()->dimension(1) != pooled_h));
+ ARM_COMPUTE_ERROR_THROW_ON(CLPoolingLayerKernel::validate(input->info(), output->info(), pool_info));
const int input_width = input->info()->dimension(0);
const int input_height = input->info()->dimension(1);
diff --git a/src/runtime/CL/functions/CLActivationLayer.cpp b/src/runtime/CL/functions/CLActivationLayer.cpp
index fbb90d9a8b..5369a59211 100644
--- a/src/runtime/CL/functions/CLActivationLayer.cpp
+++ b/src/runtime/CL/functions/CLActivationLayer.cpp
@@ -35,3 +35,8 @@ void CLActivationLayer::configure(ICLTensor *input, ICLTensor *output, Activatio
k->configure(input, output, act_info);
_kernel = std::move(k);
}
+
+Error CLActivationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+{
+ return CLActivationLayerKernel::validate(input, output, act_info);
+}
diff --git a/src/runtime/CL/functions/CLArithmeticAddition.cpp b/src/runtime/CL/functions/CLArithmeticAddition.cpp
index 5ca384d6a4..5fa0b8c33a 100644
--- a/src/runtime/CL/functions/CLArithmeticAddition.cpp
+++ b/src/runtime/CL/functions/CLArithmeticAddition.cpp
@@ -36,3 +36,8 @@ void CLArithmeticAddition::configure(const ICLTensor *input1, const ICLTensor *i
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
}
+
+Error CLArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+{
+ return CLArithmeticAdditionKernel::validate(input1, input2, output, policy);
+}
diff --git a/src/runtime/CL/functions/CLArithmeticSubtraction.cpp b/src/runtime/CL/functions/CLArithmeticSubtraction.cpp
index 651f51a24a..12a6b80691 100644
--- a/src/runtime/CL/functions/CLArithmeticSubtraction.cpp
+++ b/src/runtime/CL/functions/CLArithmeticSubtraction.cpp
@@ -36,3 +36,8 @@ void CLArithmeticSubtraction::configure(const ICLTensor *input1, const ICLTensor
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
}
+
+Error CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+{
+ return CLArithmeticSubtractionKernel::validate(input1, input2, output, policy);
+}
diff --git a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
index 68cdaac812..c4e307e541 100644
--- a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
@@ -42,6 +42,14 @@ void CLBatchNormalizationLayer::configure(ICLTensor *input, ICLTensor *output, c
_norm_kernel.configure(input, output, mean, var, beta, gamma, epsilon);
}
+Error CLBatchNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon)
+{
+ return CLBatchNormalizationLayerKernel::validate(input, output, mean, var, beta, gamma, epsilon);
+}
+
void CLBatchNormalizationLayer::run()
{
CLScheduler::get().enqueue(_norm_kernel, true);
diff --git a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
index 139d466b39..164ff153ed 100644
--- a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
+++ b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
@@ -37,3 +37,9 @@ void CLPixelWiseMultiplication::configure(const ICLTensor *input1, const ICLTens
k->configure(input1, input2, output, scale, overflow_policy, rounding_policy);
_kernel = std::move(k);
}
+
+Error CLPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+{
+ return CLPixelWiseMultiplicationKernel::validate(input1, input2, output, scale, overflow_policy, rounding_policy);
+} \ No newline at end of file
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index 83bd2d0a3a..becfb847e6 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -156,6 +156,41 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(conca
}
}
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Unsupported activation
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ }),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ })),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ })),
+ framework::dataset::make("Expected", { true, false, false, true, true, true, false })),
+ input_info, output_info, act_info, expected)
+{
+ ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info, &output_info, act_info)) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
template <typename T>
using CLActivationLayerFixture = ActivationValidationFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index 51257b4b88..19ccdaf111 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -61,6 +61,38 @@ const auto ArithmeticAdditionFP32Dataset = combine(combine(framework::dataset::m
TEST_SUITE(CL)
TEST_SUITE(ArithmeticAddition)
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("Input1Info", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ }),
+ framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S16),
+ TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ })),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S16),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ })),
+ framework::dataset::make("Expected", { false, false, true, true, true, false })),
+ input1_info, input2_info, output_info, expected)
+{
+ ARM_COMPUTE_EXPECT(bool(CLArithmeticAddition::validate(&input1_info, &input2_info, &output_info, ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
template <typename T>
using CLArithmeticAdditionFixture = ArithmeticAdditionValidationFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp
index 5e7d741009..a068c8a357 100644
--- a/tests/validation/CL/ArithmeticSubtraction.cpp
+++ b/tests/validation/CL/ArithmeticSubtraction.cpp
@@ -68,6 +68,38 @@ const auto ArithmeticSubtractionFP32Dataset = combine(combine(framework::dataset
TEST_SUITE(CL)
TEST_SUITE(ArithmeticSubtraction)
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("Input1Info", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ }),
+ framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S16),
+ TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ })),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S16),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ })),
+ framework::dataset::make("Expected", { false, false, true, true, true, false })),
+ input1_info, input2_info, output_info, expected)
+{
+ ARM_COMPUTE_EXPECT(bool(CLArithmeticSubtraction::validate(&input1_info, &input2_info, &output_info, ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
template <typename T1, typename T2 = T1, typename T3 = T1>
using CLArithmeticSubtractionFixture = ArithmeticSubtractionValidationFixture<CLTensor, CLAccessor, CLArithmeticSubtraction, T1, T2, T3>;
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index 69f8d7b635..c29a400402 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -78,6 +78,43 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran
validate(dst.info()->valid_region(), valid_region);
}
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ }),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ })),
+ framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32),
+ TensorInfo(TensorShape(2U), 1, DataType::F16),
+ TensorInfo(TensorShape(2U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U), 1, DataType::F32),
+ TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
+ TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
+ })),
+ framework::dataset::make("Expected", { false, true, true, true, true, false})),
+ input_info, output_info, mvbg_info, expected)
+{
+ auto mean_info = mvbg_info;
+ auto var_info = mvbg_info;
+ auto beta_info = mvbg_info;
+ auto gamma_info = mvbg_info;
+ bool has_error = bool(CLBatchNormalizationLayer::validate(&input_info, &output_info, &mean_info, &var_info, &beta_info, &gamma_info, 1.f));
+ ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(),
diff --git a/tests/validation/CL/PixelWiseMultiplication.cpp b/tests/validation/CL/PixelWiseMultiplication.cpp
index 66f3ff6375..d49462a7f7 100644
--- a/tests/validation/CL/PixelWiseMultiplication.cpp
+++ b/tests/validation/CL/PixelWiseMultiplication.cpp
@@ -90,6 +90,46 @@ using CLFixedPointPixelWiseMultiplicationFixture = FixedPointPixelWiseMultiplica
TEST_SUITE(CL)
TEST_SUITE(PixelWiseMultiplication)
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+ framework::dataset::make("Input1Info", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Invalid scale
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching data type
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Invalid scale
+ }),
+ framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S16),
+ TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS16, 2),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ })),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S16),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2),
+ })),
+ framework::dataset::make("Scale",{ 2.f, 2.f, -1.f, 1.f, 1.f, 1.f, 1.f, 3.f})),
+ framework::dataset::make("Expected", { false, false, true, true, true, true, true, true })),
+ input1_info, input2_info, output_info, scale, expected)
+{
+ bool has_error = bool(CLPixelWiseMultiplication::validate(&input1_info, &input2_info, &output_info, scale, ConvertPolicy::WRAP, RoundingPolicy::TO_ZERO));
+ ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
TEST_SUITE(F16toF16)
TEST_SUITE(Scale255)