aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-11-03 19:01:44 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitf9d3a0a12ede4db89348fd924274c9acc6809bb2 (patch)
tree73080e4a230c4db248d6f9e16181eab09c8168dc /arm_compute/core
parentd6afedc775220f17317f1835a4d18b72a54525de (diff)
downloadComputeLibrary-f9d3a0a12ede4db89348fd924274c9acc6809bb2.tar.gz
COMPMID-617: Add validation functions.
Added validation routines to the following kernels. -CLActivationLayer -CLBatchNormalizationLayer -CLArithmeticAddition -CLArithmeticSubtraction -CLPixelwiseMultiplication Change-Id: I0f3a03154f9e392279f715af656683cd0ad4cef5 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94595 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/CL/kernels/CLActivationLayerKernel.h10
-rw-r--r--arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h10
-rw-r--r--arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h10
-rw-r--r--arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h18
-rw-r--r--arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h14
-rw-r--r--arm_compute/core/CL/kernels/CLPoolingLayerKernel.h6
-rw-r--r--arm_compute/core/Error.h4
-rw-r--r--arm_compute/core/Validate.h44
8 files changed, 89 insertions, 27 deletions
diff --git a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
index dab133f05a..30bf6fb0d6 100644
--- a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
@@ -56,6 +56,16 @@ public:
* @param[in] act_info Activation layer information.
*/
void configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayerKernel
+ *
+ * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
+ * of the activation function. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor info. Data type supported: same as @p input
+ * @param[in] act_info Activation layer information.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
index 0895fe3f79..aa4b9d6c5f 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
@@ -59,6 +59,16 @@ public:
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAdditionKernel
+ *
+ * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] policy Policy to use to handle overflow.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
index d7755d5e31..13136d93e0 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
@@ -61,6 +61,16 @@ public:
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticSubtractionKernel
+ *
+ * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] policy Policy to use to handle overflow.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
index 26825efba9..a24432145a 100644
--- a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
@@ -63,6 +63,24 @@ public:
* @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
*/
void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayerKernel
+ *
+ * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
+ * 3 lower dimensions represent a single input with dimensions [width, height, FM].
+ * @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
+ * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] beta Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+ * @param[in] epsilon Small value to avoid division with zero.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
index 309a202df3..ed876df06f 100644
--- a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
@@ -59,6 +59,20 @@ public:
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplicationKernel
+ *
+ * @param[in] input1 An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1.
+ * @param[in] output The output tensor info, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+ * @param[in] scale Scale to apply after multiplication.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+ * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
+ * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
+ *
+ * @return an error status
+ */
+ static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
index 29190e2dd1..a9159a4bb8 100644
--- a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
@@ -60,11 +60,11 @@ public:
void configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLPoolingLayerKernel
*
- * @param[in] input Input's tensor info
- * @param[in] output Output's tensor info
+ * @param[in] input Source tensor info. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] output Destination tensor info. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
- * @return an error
+ * @return an error status
*/
static Error validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
diff --git a/arm_compute/core/Error.h b/arm_compute/core/Error.h
index 6e4aa6a4f8..fa3f9c0615 100644
--- a/arm_compute/core/Error.h
+++ b/arm_compute/core/Error.h
@@ -240,7 +240,7 @@ Error create_error(ErrorCode error_code, const char *function, const char *file,
*
* @param[in] error Error value to check.
*/
-#define ARM_COMPUTE_ERROR_THROW(error) \
+#define ARM_COMPUTE_ERROR_THROW_ON(error) \
error.throw_if_error();
/** If the condition is true, the given message is printed and an exception is thrown
@@ -282,7 +282,7 @@ Error create_error(ErrorCode error_code, const char *function, const char *file,
*/
#define ARM_COMPUTE_CONST_ON_ERROR(cond, val, msg) (cond) ? throw std::logic_error(msg) : val;
#else /* ARM_COMPUTE_ASSERTS_ENABLED */
-#define ARM_COMPUTE_ERROR_THROW(error)
+#define ARM_COMPUTE_ERROR_THROW_ON(error)
#define ARM_COMPUTE_ERROR_ON_MSG(cond, ...)
#define ARM_COMPUTE_ERROR_ON_LOC_MSG(cond, func, file, line, ...)
#define ARM_COMPUTE_CONST_ON_ERROR(cond, val, msg) val
diff --git a/arm_compute/core/Validate.h b/arm_compute/core/Validate.h
index 1e2d262683..2ca9f6b64e 100644
--- a/arm_compute/core/Validate.h
+++ b/arm_compute/core/Validate.h
@@ -142,7 +142,7 @@ inline arm_compute::Error error_on_nullptr(const char *function, const char *fil
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -164,7 +164,7 @@ inline arm_compute::Error error_on_nullptr(const char *function, const char *fil
arm_compute::Error error_on_mismatching_windows(const char *function, const char *file, const int line,
const Window &full, const Window &win);
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(f, w) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_WINDOWS(f, w) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
@@ -186,7 +186,7 @@ arm_compute::Error error_on_mismatching_windows(const char *function, const char
arm_compute::Error error_on_invalid_subwindow(const char *function, const char *file, const int line,
const Window &full, const Window &sub);
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBWINDOW(f, s) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
@@ -206,7 +206,7 @@ arm_compute::Error error_on_invalid_subwindow(const char *function, const char *
arm_compute::Error error_on_window_not_collapsable_at_dimension(const char *function, const char *file, const int line,
const Window &full, const Window &window, const int dim);
#define ARM_COMPUTE_ERROR_ON_WINDOW_NOT_COLLAPSABLE_AT_DIMENSION(f, w, d) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
#define ARM_COMPUTE_RETURN_ERROR_ON_WINDOW_NOT_COLLAPSABLE_AT_DIMENSION(f, w, d) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
@@ -225,7 +225,7 @@ arm_compute::Error error_on_window_not_collapsable_at_dimension(const char *func
arm_compute::Error error_on_coordinates_dimensions_gte(const char *function, const char *file, const int line,
const Coordinates &pos, unsigned int max_dim);
#define ARM_COMPUTE_ERROR_ON_COORDINATES_DIMENSIONS_GTE(p, md) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
#define ARM_COMPUTE_RETURN_ERROR_ON_COORDINATES_DIMENSIONS_GTE(p, md) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
@@ -244,7 +244,7 @@ arm_compute::Error error_on_coordinates_dimensions_gte(const char *function, con
arm_compute::Error error_on_window_dimensions_gte(const char *function, const char *file, const int line,
const Window &win, unsigned int max_dim);
#define ARM_COMPUTE_ERROR_ON_WINDOW_DIMENSIONS_GTE(w, md) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
#define ARM_COMPUTE_RETURN_ERROR_ON_WINDOW_DIMENSIONS_GTE(w, md) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
@@ -267,7 +267,7 @@ arm_compute::Error error_on_mismatching_dimensions(const char *function, const c
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -357,7 +357,7 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -408,7 +408,7 @@ inline arm_compute::Error error_on_mismatching_data_types(const char *function,
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -432,7 +432,7 @@ inline arm_compute::Error error_on_mismatching_fixed_point(const char *function,
DataType &&first_data_type = tensor_info_1->data_type();
const int first_fixed_point_position = tensor_info_1->fixed_point_position();
- if((first_data_type != DataType::QS8) && (first_data_type != DataType::QS16))
+ if(!is_data_type_fixed_point(first_data_type))
{
return arm_compute::Error{};
}
@@ -473,7 +473,7 @@ inline arm_compute::Error error_on_mismatching_fixed_point(const char *function,
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -556,7 +556,7 @@ inline arm_compute::Error error_on_data_type_not_in(const char *function, const
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(t, ...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(t, ...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
@@ -602,7 +602,7 @@ inline arm_compute::Error error_on_data_type_channel_not_in(const char *function
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c, ...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c, ...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
@@ -618,7 +618,7 @@ inline arm_compute::Error error_on_data_type_channel_not_in(const char *function
arm_compute::Error error_on_tensor_not_2d(const char *function, const char *file, const int line,
const ITensor *tensor);
#define ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(t) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
#define ARM_COMPUTE_RETURN_ERROR_ON_TENSOR_NOT_2D(t) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
@@ -648,7 +648,7 @@ inline arm_compute::Error error_on_channel_not_in(const char *function, const ch
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_CHANNEL_NOT_IN(c, ...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_CHANNEL_NOT_IN(c, ...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
@@ -665,7 +665,7 @@ inline arm_compute::Error error_on_channel_not_in(const char *function, const ch
arm_compute::Error error_on_channel_not_in_known_format(const char *function, const char *file, const int line,
Format fmt, Channel cn);
#define ARM_COMPUTE_ERROR_ON_CHANNEL_NOT_IN_KNOWN_FORMAT(f, c) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
#define ARM_COMPUTE_RETURN_ERROR_ON_CHANNEL_NOT_IN_KNOWN_FORMAT(f, c) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
@@ -687,7 +687,7 @@ arm_compute::Error error_on_channel_not_in_known_format(const char *function, co
arm_compute::Error error_on_invalid_multi_hog(const char *function, const char *file, const int line,
const IMultiHOG *multi_hog);
#define ARM_COMPUTE_ERROR_ON_INVALID_MULTI_HOG(m) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_MULTI_HOG(m) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
@@ -701,7 +701,7 @@ arm_compute::Error error_on_invalid_multi_hog(const char *function, const char *
arm_compute::Error error_on_unconfigured_kernel(const char *function, const char *file, const int line,
const IKernel *kernel);
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
#define ARM_COMPUTE_RETURN_ERROR_ON_UNCONFIGURED_KERNEL(k) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
@@ -719,7 +719,7 @@ arm_compute::Error error_on_unconfigured_kernel(const char *function, const char
arm_compute::Error error_on_invalid_subtensor(const char *function, const char *file, const int line,
const TensorShape &parent_shape, const Coordinates &coords, const TensorShape &shape);
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBTENSOR(p, c, s) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBTENSOR(p, c, s) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
@@ -736,7 +736,7 @@ arm_compute::Error error_on_invalid_subtensor(const char *function, const char *
arm_compute::Error error_on_invalid_subtensor_valid_region(const char *function, const char *file, const int line,
const ValidRegion &parent_valid_region, const ValidRegion &valid_region);
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBTENSOR_VALID_REGION(pv, sv) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBTENSOR_VALID_REGION(pv, sv) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
@@ -783,7 +783,7 @@ inline arm_compute::Error error_on_mismatching_fixed_point_position(const char *
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -827,7 +827,7 @@ inline arm_compute::Error error_on_value_not_representable_in_fixed_point(const
return arm_compute::Error{};
}
#define ARM_COMPUTE_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(...) \
- ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
+ ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
#define ARM_COMPUTE_RETURN_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(...) \
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
}