aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-12-06 11:53:03 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commit631c41a4e3645a948b0f597caa77e8fa91ca0efc (patch)
tree164fd113818b8e890fc16bad97240056cb71e747 /arm_compute/core
parent57f249b08fd65af761d5c8bfe62de117d67a14c7 (diff)
downloadComputeLibrary-631c41a4e3645a948b0f597caa77e8fa91ca0efc.tar.gz
COMPMID-556: Rename Error to Status and inverse logic
Change-Id: Ib57d4f7177cc6179302bda7ad870acb8bd3825f5 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112115 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/CL/kernels/CLActivationLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h10
-rw-r--r--arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLPoolingLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h16
-rw-r--r--arm_compute/core/CL/kernels/CLTransposeKernel.h4
-rw-r--r--arm_compute/core/CPP/kernels/CPPPermuteKernel.h4
-rw-r--r--arm_compute/core/Error.h70
-rw-r--r--arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h10
-rw-r--r--arm_compute/core/NEON/kernels/NECol2ImKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h8
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEIm2ColKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NETransposeKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h2
-rw-r--r--arm_compute/core/Validate.h242
-rw-r--r--arm_compute/core/utils/quantization/AsymmHelpers.h8
32 files changed, 236 insertions, 228 deletions
diff --git a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
index 30bf6fb0d6..5b6c44cddf 100644
--- a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
@@ -63,9 +63,9 @@ public:
* @param[in] output Destination tensor info. Data type supported: same as @p input
* @param[in] act_info Activation layer information.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
index aa4b9d6c5f..96b8dc8d48 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
@@ -66,9 +66,9 @@ public:
* @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
index 13136d93e0..c5f862a61f 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
@@ -68,9 +68,9 @@ public:
* @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
index a5559bf8aa..8643d83bcc 100644
--- a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
@@ -75,12 +75,12 @@ public:
* @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] epsilon Small value to avoid division with zero.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output,
- const ITensorInfo *mean, const ITensorInfo *var,
- const ITensorInfo *beta, const ITensorInfo *gamma,
- float epsilon);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
index 2d8c3a337e..d47b7da213 100644
--- a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
@@ -80,9 +80,9 @@ public:
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
* @param[in] target Target GPU architecture.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
index a1c6a1f7e1..49e19e3c63 100644
--- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
@@ -79,9 +79,9 @@ public:
* @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
* Along with @p min, this value can be used to implement "rectified linear unit" activation functions
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
index 08554983d6..87b70efdf5 100644
--- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
@@ -80,9 +80,9 @@ public:
* @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
* Along with @p min, this value can be used to implement "rectified linear unit" activation functions
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h
index 38daac69b1..d931152cb9 100644
--- a/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h
@@ -60,9 +60,9 @@ public:
* @param[in] output Destination tensor. Output will have the same number of dimensions as input. Data types supported: same as @p input.
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output, NormalizationLayerInfo norm_info);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, NormalizationLayerInfo norm_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
index ed876df06f..6746a49dde 100644
--- a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
@@ -69,10 +69,10 @@ public:
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
index ffb5d79514..e9ce28b3f9 100644
--- a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
@@ -64,9 +64,9 @@ public:
* @param[in] output Destination tensor info. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h
index b0adb67578..c072d2a6de 100644
--- a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h
@@ -47,9 +47,9 @@ public:
* @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32
* @param[in] output Destination tensor. Data types supported: same as @p input
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
};
/** Interface for shifting, exponentiating and summing the logits */
@@ -82,9 +82,9 @@ public:
* @param[in] output Destination tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
* @param[in] sum Sum of 1D logits tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -129,9 +129,9 @@ public:
* @param[in] output Destination tensor. Data types supported: same as @p input
* @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p input
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
/** Checks if the given size is eligible for parallel reduction
*
* @note Serial reduction is launched for width < (_grid_size * _serial_vector_size).
@@ -186,9 +186,9 @@ public:
* @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
* @param[in] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLTransposeKernel.h b/arm_compute/core/CL/kernels/CLTransposeKernel.h
index e7367caf72..2e1b481d3f 100644
--- a/arm_compute/core/CL/kernels/CLTransposeKernel.h
+++ b/arm_compute/core/CL/kernels/CLTransposeKernel.h
@@ -49,9 +49,9 @@ public:
* @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
* @param[in] output Output tensor. Data type supported: Same as @p input
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_CLTRANSPOSEKERNEL_H__ */
diff --git a/arm_compute/core/CPP/kernels/CPPPermuteKernel.h b/arm_compute/core/CPP/kernels/CPPPermuteKernel.h
index 31011e4a5c..0e7c93877e 100644
--- a/arm_compute/core/CPP/kernels/CPPPermuteKernel.h
+++ b/arm_compute/core/CPP/kernels/CPPPermuteKernel.h
@@ -63,9 +63,9 @@ public:
* @param[in] output The output tensor. Data types supported: Same as @p input
* @param[in] perm Permutation vector
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/Error.h b/arm_compute/core/Error.h
index bd80aa4162..97dbba3fab 100644
--- a/arm_compute/core/Error.h
+++ b/arm_compute/core/Error.h
@@ -35,39 +35,39 @@ enum class ErrorCode
RUNTIME_ERROR /**< Generic runtime error */
};
-/** Error class */
-class Error
+/** Status class */
+class Status
{
public:
/** Default Constructor **/
- Error()
- : _code(ErrorCode::OK), _description(" ")
+ Status()
+ : _code(ErrorCode::OK), _error_description(" ")
{
}
/** Default Constructor
*
* @param error_status Error status.
- * @param error_description Error description if error_status is not valid.
+ * @param error_description (Optional) Error description if error_status is not valid.
*/
- explicit Error(ErrorCode error_status, std::string error_description = " ")
- : _code(error_status), _description(error_description)
+ explicit Status(ErrorCode error_status, std::string error_description = " ")
+ : _code(error_status), _error_description(error_description)
{
}
/** Allow instances of this class to be copy constructed */
- Error(const Error &) = default;
+ Status(const Status &) = default;
/** Allow instances of this class to be move constructed */
- Error(Error &&) = default;
+ Status(Status &&) = default;
/** Allow instances of this class to be copy assigned */
- Error &operator=(const Error &) = default;
+ Status &operator=(const Status &) = default;
/** Allow instances of this class to be move assigned */
- Error &operator=(Error &&) = default;
+ Status &operator=(Status &&) = default;
/** Explicit bool conversion operator
*
- * @return True if there is a valid error else false if status is OK.
+ * @return True if there is no error else false
*/
explicit operator bool() const noexcept
{
- return _code != ErrorCode::OK;
+ return _code == ErrorCode::OK;
}
/** Gets error code
*
@@ -81,14 +81,14 @@ public:
*
* @return Error description.
*/
- std::string description() const
+ std::string error_description() const
{
- return _description;
+ return _error_description;
}
/** Throws a runtime exception in case it contains a valid error status */
void throw_if_error()
{
- if(bool(*this))
+ if(!bool(*this))
{
internal_throw_on_error();
}
@@ -100,7 +100,7 @@ private:
private:
ErrorCode _code;
- std::string _description;
+ std::string _error_description;
};
/** Creates an error containing the error message from variable argument list
@@ -111,8 +111,10 @@ private:
* @param[in] line Line on which the error occurred.
* @param[in] msg Message to display before aborting.
* @param[in] args Variable argument list of the message.
+ *
+ * @return status containing the error
*/
-Error create_error_va_list(ErrorCode error_code, const char *function, const char *file, const int line, const char *msg, va_list args);
+Status create_error_va_list(ErrorCode error_code, const char *function, const char *file, const int line, const char *msg, va_list args);
/** Creates an error containing the error message
*
* @param[in] error_code Error code
@@ -121,8 +123,10 @@ Error create_error_va_list(ErrorCode error_code, const char *function, const cha
* @param[in] line Line on which the error occurred.
* @param[in] msg Message to display before aborting.
* @param[in] ... Variable number of arguments of the message.
+ *
+ * @return status containing the error
*/
-Error create_error(ErrorCode error_code, const char *function, const char *file, const int line, const char *msg, ...);
+Status create_error(ErrorCode error_code, const char *function, const char *file, const int line, const char *msg, ...);
/** Print an error message then throw an std::runtime_error
*
* @param[in] function Function in which the error occurred.
@@ -159,17 +163,17 @@ Error create_error(ErrorCode error_code, const char *function, const char *file,
*/
#define ARM_COMPUTE_CREATE_ERROR_LOC(error_code, func, file, line, ...) ::arm_compute::create_error(error_code, func, file, line, __VA_ARGS__) // NOLINT
-/** Checks if an error value is valid if not returns
+/** Checks if a status contains an error and returns it
*
- * @param[in] error Error value to check
+ * @param[in] status Status value to check
*/
-#define ARM_COMPUTE_RETURN_ON_ERROR(error) \
- do \
- { \
- if(bool(error)) \
- { \
- return error; \
- } \
+#define ARM_COMPUTE_RETURN_ON_ERROR(status) \
+ do \
+ { \
+ if(!bool(status)) \
+ { \
+ return status; \
+ } \
} while(false)
/** Checks if an error value is valid if not throws an exception with the error
@@ -243,12 +247,12 @@ Error create_error(ErrorCode error_code, const char *function, const char *file,
#define ARM_COMPUTE_ERROR_LOC(func, file, line, ...) ::arm_compute::error(func, file, line, __VA_ARGS__) // NOLINT
#ifdef ARM_COMPUTE_ASSERTS_ENABLED
-/** Checks if an error value is valid if not throws an exception with the error
+/** Checks if a status value is valid if not throws an exception with the error
*
- * @param[in] error Error value to check.
+ * @param[in] status Status value to check.
*/
-#define ARM_COMPUTE_ERROR_THROW_ON(error) \
- error.throw_if_error()
+#define ARM_COMPUTE_ERROR_THROW_ON(status) \
+ status.throw_if_error()
/** If the condition is true, the given message is printed and an exception is thrown
*
@@ -289,7 +293,7 @@ Error create_error(ErrorCode error_code, const char *function, const char *file,
*/
#define ARM_COMPUTE_CONST_ON_ERROR(cond, val, msg) (cond) ? throw std::logic_error(msg) : val;
#else /* ARM_COMPUTE_ASSERTS_ENABLED */
-#define ARM_COMPUTE_ERROR_THROW_ON(error)
+#define ARM_COMPUTE_ERROR_THROW_ON(status)
#define ARM_COMPUTE_ERROR_ON_MSG(cond, ...)
#define ARM_COMPUTE_ERROR_ON_LOC_MSG(cond, func, file, line, ...)
#define ARM_COMPUTE_CONST_ON_ERROR(cond, val, msg) val
diff --git a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
index 044cf6846b..b830e022d7 100644
--- a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
@@ -75,9 +75,9 @@ public:
* @param[in] output The output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32.
* @param[in] policy Overflow policy.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
index 663f62864d..af81d396bb 100644
--- a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
@@ -75,9 +75,9 @@ public:
* @param[in] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
* @param[in] policy Policy to use to handle overflow.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h
index f5f818c083..f3c5574e76 100644
--- a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h
@@ -74,12 +74,12 @@ public:
* @param[in] gamma Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] epsilon Small value to avoid division with zero.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output,
- const ITensorInfo *mean, const ITensorInfo *var,
- const ITensorInfo *beta, const ITensorInfo *gamma,
- float epsilon);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NECol2ImKernel.h b/arm_compute/core/NEON/kernels/NECol2ImKernel.h
index 960e3021db..243cc77a4b 100644
--- a/arm_compute/core/NEON/kernels/NECol2ImKernel.h
+++ b/arm_compute/core/NEON/kernels/NECol2ImKernel.h
@@ -81,9 +81,9 @@ public:
* while the rest represent batch of outputs. Data types supported: Same as @p input
* @param[in] convolved_dims Output convolved dimensions.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h b/arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
index db719caccb..fd93def0cb 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
@@ -65,9 +65,9 @@ public:
* @param[in] input Input tensor info. Data types supported: U8/S8/QS8/QASYMM8/QS16/U16/S16/F16/U32/S32/F32
* @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
index 1a5b0fb863..b9bb18d2b5 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
@@ -58,9 +58,9 @@ public:
* @param[in] block_width The width of the blocks to be interleaved.
* @param[in] transpose True if transpose operation must be performed, false otherwise.
*
- * @return an error status
+ * @return a status
*/
- Error validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int block_height, unsigned int block_width, bool transpose);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int block_height, unsigned int block_width, bool transpose);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
index d9986b6cdd..7435994b8a 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
@@ -69,9 +69,9 @@ public:
* @param[in] input1 Input tensor info containing the transposed Matrix B. Data type supported: same as @p input0
* @param[in] output Output tensor info to store the result of matrix multiplication. Data type supported: S32
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h
index ac0af7cff3..531968304f 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h
@@ -78,9 +78,9 @@ public:
* @param[in] a_offset Offset to be added to each element of the matrix A.
* @param[in] b_offset Offset to be added to each element of the matrix B.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, int32_t a_offset, int32_t b_offset);
+ static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, int32_t a_offset, int32_t b_offset);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
index 24ba54ebdf..b1dd1fb2d5 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
@@ -80,9 +80,9 @@ public:
* @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
* Along with @p min, this value can be used to implement "rectified linear unit" activation functions
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
index d873a889d2..10b333032e 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
@@ -80,9 +80,9 @@ public:
* @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
* Along with @p min, this value can be used to implement "rectified linear unit" activation functions
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
index 50d8b4070e..38c353e293 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
@@ -84,9 +84,9 @@ public:
* @param[in] num_mtx_a_cols Number of matrix A columns
* @param[in] is_interleaved4x4 True if the matrix A has been interleaved4x4
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4);
+ static Status validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
@@ -115,9 +115,9 @@ public:
* @param[in] num_mtx_b_rows Number of matrix B rows
* @param[in] is_transposed1xW True if the input tensor is transposed 1xW
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW);
+ static Status validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h b/arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h
index 4436d1fdb0..e8ee2a7d29 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h
@@ -79,9 +79,9 @@ public:
* @param[in] input Input tensor info. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
* @param[in] output Output tensor info. Data type supported: same as @p input.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEIm2ColKernel.h b/arm_compute/core/NEON/kernels/NEIm2ColKernel.h
index 5f9df5136b..bc12b22e59 100644
--- a/arm_compute/core/NEON/kernels/NEIm2ColKernel.h
+++ b/arm_compute/core/NEON/kernels/NEIm2ColKernel.h
@@ -91,9 +91,9 @@ public:
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
* @param[in] has_bias In case biases are provided expands the matrix with 1.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
index 90b658e354..05eb8d6ddc 100644
--- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
@@ -72,9 +72,9 @@ public:
* @param[in] overflow_policy Overflow policy.
* @param[in] rounding_policy Rounding policy.
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NETransposeKernel.h b/arm_compute/core/NEON/kernels/NETransposeKernel.h
index 66dc6b1989..855d270e49 100644
--- a/arm_compute/core/NEON/kernels/NETransposeKernel.h
+++ b/arm_compute/core/NEON/kernels/NETransposeKernel.h
@@ -62,9 +62,9 @@ public:
* @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
* @param[in] output Output tensor. Data type supported: Same as @p input
*
- * @return an error status
+ * @return a status
*/
- static Error validate(const ITensorInfo *input, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
index 66684a1185..b03e5fa1a2 100644
--- a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
+++ b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
@@ -48,7 +48,7 @@ public:
* @param[in] output Output tensor info to store the result of matrix multiplication.
* If @p beta is not zero the values are multiplied by @p beta before the result is accumulated. Otherwise the values are overwritten by the result. Data types supported: S32
*/
- static Error validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output);
protected:
void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1) override;
diff --git a/arm_compute/core/Validate.h b/arm_compute/core/Validate.h
index 227c3e7d69..4ef0e11433 100644
--- a/arm_compute/core/Validate.h
+++ b/arm_compute/core/Validate.h
@@ -81,11 +81,11 @@ public:
*
* @param[in] dim To be compared object.
*/
- arm_compute::Error operator()(const Dimensions<T> &dim)
+ arm_compute::Status operator()(const Dimensions<T> &dim)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(have_different_dimensions(_dim, dim, 0), _function, _file, _line,
"Objects have different dimensions");
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
private:
@@ -96,17 +96,17 @@ private:
};
template <typename F>
-inline arm_compute::Error for_each_error(F &&)
+inline arm_compute::Status for_each_error(F &&)
{
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
template <typename F, typename T, typename... Ts>
-inline arm_compute::Error for_each_error(F &&func, T &&arg, Ts &&... args)
+inline arm_compute::Status for_each_error(F &&func, T &&arg, Ts &&... args)
{
ARM_COMPUTE_RETURN_ON_ERROR(func(arg));
ARM_COMPUTE_RETURN_ON_ERROR(for_each_error(func, args...));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
template <typename T>
@@ -128,10 +128,10 @@ struct get_tensor_info_t<ITensorInfo *>
* @param[in] line Line on which the error occurred.
* @param[in] pointers Pointers to check against nullptr.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_nullptr(const char *function, const char *file, const int line, Ts &&... pointers)
+inline arm_compute::Status error_on_nullptr(const char *function, const char *file, const int line, Ts &&... pointers)
{
const std::array<const void *, sizeof...(Ts)> pointers_array{ { std::forward<Ts>(pointers)... } };
bool has_nullptr = std::any_of(pointers_array.begin(), pointers_array.end(), [&](const void *ptr)
@@ -139,7 +139,7 @@ inline arm_compute::Error error_on_nullptr(const char *function, const char *fil
return (ptr == nullptr);
});
ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(has_nullptr, function, file, line, "Nullptr object!");
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -159,10 +159,10 @@ inline arm_compute::Error error_on_nullptr(const char *function, const char *fil
* @param[in] full Full size window
* @param[in] win Window to validate.
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_mismatching_windows(const char *function, const char *file, const int line,
- const Window &full, const Window &win);
+arm_compute::Status error_on_mismatching_windows(const char *function, const char *file, const int line,
+ const Window &full, const Window &win);
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(f, w) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_WINDOWS(f, w) \
@@ -181,10 +181,10 @@ arm_compute::Error error_on_mismatching_windows(const char *function, const char
* @param[in] full Full size window
* @param[in] sub Sub-window to validate.
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_invalid_subwindow(const char *function, const char *file, const int line,
- const Window &full, const Window &sub);
+arm_compute::Status error_on_invalid_subwindow(const char *function, const char *file, const int line,
+ const Window &full, const Window &sub);
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBWINDOW(f, s) \
@@ -201,10 +201,10 @@ arm_compute::Error error_on_invalid_subwindow(const char *function, const char *
* @param[in] window Window to be collapsed.
* @param[in] dim Dimension need to be checked.
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_window_not_collapsable_at_dimension(const char *function, const char *file, const int line,
- const Window &full, const Window &window, const int dim);
+arm_compute::Status error_on_window_not_collapsable_at_dimension(const char *function, const char *file, const int line,
+ const Window &full, const Window &window, const int dim);
#define ARM_COMPUTE_ERROR_ON_WINDOW_NOT_COLLAPSABLE_AT_DIMENSION(f, w, d) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
#define ARM_COMPUTE_RETURN_ERROR_ON_WINDOW_NOT_COLLAPSABLE_AT_DIMENSION(f, w, d) \
@@ -220,10 +220,10 @@ arm_compute::Error error_on_window_not_collapsable_at_dimension(const char *func
* @param[in] pos Coordinates to validate
* @param[in] max_dim Maximum number of dimensions allowed.
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_coordinates_dimensions_gte(const char *function, const char *file, const int line,
- const Coordinates &pos, unsigned int max_dim);
+arm_compute::Status error_on_coordinates_dimensions_gte(const char *function, const char *file, const int line,
+ const Coordinates &pos, unsigned int max_dim);
#define ARM_COMPUTE_ERROR_ON_COORDINATES_DIMENSIONS_GTE(p, md) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
#define ARM_COMPUTE_RETURN_ERROR_ON_COORDINATES_DIMENSIONS_GTE(p, md) \
@@ -239,10 +239,10 @@ arm_compute::Error error_on_coordinates_dimensions_gte(const char *function, con
* @param[in] win Window to validate
* @param[in] max_dim Maximum number of dimensions allowed.
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_window_dimensions_gte(const char *function, const char *file, const int line,
- const Window &win, unsigned int max_dim);
+arm_compute::Status error_on_window_dimensions_gte(const char *function, const char *file, const int line,
+ const Window &win, unsigned int max_dim);
#define ARM_COMPUTE_ERROR_ON_WINDOW_DIMENSIONS_GTE(w, md) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
#define ARM_COMPUTE_RETURN_ERROR_ON_WINDOW_DIMENSIONS_GTE(w, md) \
@@ -257,14 +257,14 @@ arm_compute::Error error_on_window_dimensions_gte(const char *function, const ch
* @param[in] dim2 The second object to be compared.
* @param[in] dims (Optional) Further allowed objects.
*
- * @return Error
+ * @return Status
*/
template <typename T, typename... Ts>
-arm_compute::Error error_on_mismatching_dimensions(const char *function, const char *file, int line,
- const Dimensions<T> &dim1, const Dimensions<T> &dim2, Ts &&... dims)
+arm_compute::Status error_on_mismatching_dimensions(const char *function, const char *file, int line,
+ const Dimensions<T> &dim1, const Dimensions<T> &dim2, Ts &&... dims)
{
ARM_COMPUTE_RETURN_ON_ERROR(detail::for_each_error(detail::compare_dimension<T>(dim1, function, file, line), dim2, std::forward<Ts>(dims)...));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -280,11 +280,11 @@ arm_compute::Error error_on_mismatching_dimensions(const char *function, const c
* @param[in] tensor_info_2 The second tensor info to be compared.
* @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_shapes(const char *function, const char *file, const int line,
- const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
+inline arm_compute::Status error_on_mismatching_shapes(const char *function, const char *file, const int line,
+ const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
{
return error_on_mismatching_shapes(function, file, line, 0U, tensor_info_1, tensor_info_2, std::forward<Ts>(tensor_infos)...);
}
@@ -297,11 +297,11 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
* @param[in] tensor_2 The second tensor to be compared.
* @param[in] tensors (Optional) Further allowed tensors.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_shapes(const char *function, const char *file, const int line,
- const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
+inline arm_compute::Status error_on_mismatching_shapes(const char *function, const char *file, const int line,
+ const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
{
return error_on_mismatching_shapes(function, file, line, 0U, tensor_1, tensor_2, std::forward<Ts>(tensors)...);
}
@@ -315,11 +315,11 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
* @param[in] tensor_info_2 The second tensor info to be compared.
* @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_shapes(const char *function, const char *file, const int line,
- unsigned int upper_dim, const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
+inline arm_compute::Status error_on_mismatching_shapes(const char *function, const char *file, const int line,
+ unsigned int upper_dim, const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor_info_1 == nullptr, function, file, line);
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor_info_2 == nullptr, function, file, line);
@@ -331,7 +331,7 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
return detail::have_different_dimensions((*tensors_info_array.cbegin())->tensor_shape(), tensor_info->tensor_shape(), upper_dim);
}),
function, file, line, "Tensors have different shapes");
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
/** Return an error if the passed two tensors have different shapes from the given dimension
*
@@ -343,18 +343,18 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
* @param[in] tensor_2 The second tensor to be compared.
* @param[in] tensors (Optional) Further allowed tensors.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_shapes(const char *function, const char *file, const int line,
- unsigned int upper_dim, const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
+inline arm_compute::Status error_on_mismatching_shapes(const char *function, const char *file, const int line,
+ unsigned int upper_dim, const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor_1 == nullptr, function, file, line);
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor_2 == nullptr, function, file, line);
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_nullptr(function, file, line, std::forward<Ts>(tensors)...));
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_shapes(function, file, line, upper_dim, tensor_1->info(), tensor_2->info(),
detail::get_tensor_info_t<ITensorInfo *>()(tensors)...));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -369,11 +369,11 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
* @param[in] tensor_info The first tensor info to be compared.
* @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_data_types(const char *function, const char *file, const int line,
- const ITensorInfo *tensor_info, Ts... tensor_infos)
+inline arm_compute::Status error_on_mismatching_data_types(const char *function, const char *file, const int line,
+ const ITensorInfo *tensor_info, Ts... tensor_infos)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor_info == nullptr, function, file, line);
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_nullptr(function, file, line, std::forward<Ts>(tensor_infos)...));
@@ -385,7 +385,7 @@ inline arm_compute::Error error_on_mismatching_data_types(const char *function,
return tensor_info_obj->data_type() != tensor_data_type;
}),
function, file, line, "Tensors have different data types");
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
/** Return an error if the passed two tensors have different data types
*
@@ -395,17 +395,17 @@ inline arm_compute::Error error_on_mismatching_data_types(const char *function,
* @param[in] tensor The first tensor to be compared.
* @param[in] tensors (Optional) Further allowed tensors.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_data_types(const char *function, const char *file, const int line,
- const ITensor *tensor, Ts... tensors)
+inline arm_compute::Status error_on_mismatching_data_types(const char *function, const char *file, const int line,
+ const ITensor *tensor, Ts... tensors)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor == nullptr, function, file, line);
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_nullptr(function, file, line, std::forward<Ts>(tensors)...));
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_data_types(function, file, line, tensor->info(),
detail::get_tensor_info_t<ITensorInfo *>()(tensors)...));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -423,18 +423,18 @@ inline arm_compute::Error error_on_mismatching_data_types(const char *function,
* @param[in] tensor_info_2 The second tensor info to be compared.
* @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_fixed_point(const char *function, const char *file, const int line,
- const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
+inline arm_compute::Status error_on_mismatching_fixed_point(const char *function, const char *file, const int line,
+ const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
{
DataType &&first_data_type = tensor_info_1->data_type();
const int first_fixed_point_position = tensor_info_1->fixed_point_position();
if(!is_data_type_fixed_point(first_data_type))
{
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
const std::array < const ITensorInfo *, 1 + sizeof...(Ts) > tensor_infos_array{ { tensor_info_2, std::forward<Ts>(tensor_infos)... } };
@@ -449,7 +449,7 @@ inline arm_compute::Error error_on_mismatching_fixed_point(const char *function,
}),
function, file, line, "Tensors have different fixed point positions");
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
/** Return an error if the passed tensor have different fixed point data types or different fixed point positions
*
@@ -462,15 +462,15 @@ inline arm_compute::Error error_on_mismatching_fixed_point(const char *function,
* @param[in] tensor_2 The second tensor to be compared.
* @param[in] tensors (Optional) Further allowed tensors.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_fixed_point(const char *function, const char *file, const int line,
- const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
+inline arm_compute::Status error_on_mismatching_fixed_point(const char *function, const char *file, const int line,
+ const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
{
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_fixed_point(function, file, line, tensor_1->info(), tensor_2->info(),
detail::get_tensor_info_t<ITensorInfo *>()(tensors)...));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -488,18 +488,18 @@ inline arm_compute::Error error_on_mismatching_fixed_point(const char *function,
* @param[in] tensor_info_2 The second tensor info to be compared.
* @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_quantization_info(const char *function, const char *file, const int line,
- const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
+inline arm_compute::Status error_on_mismatching_quantization_info(const char *function, const char *file, const int line,
+ const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
{
DataType &&first_data_type = tensor_info_1->data_type();
const QuantizationInfo first_quantization_info = tensor_info_1->quantization_info();
if(!is_data_type_quantized_asymmetric(first_data_type))
{
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
const std::array < const ITensorInfo *, 1 + sizeof...(Ts) > tensor_infos_array{ { tensor_info_2, std::forward<Ts>(tensor_infos)... } };
@@ -514,7 +514,7 @@ inline arm_compute::Error error_on_mismatching_quantization_info(const char *fun
}),
function, file, line, "Tensors have different quantization information");
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
/** Return an error if the passed tensor have different asymmetric quantized data types or different quantization info
*
@@ -527,15 +527,15 @@ inline arm_compute::Error error_on_mismatching_quantization_info(const char *fun
* @param[in] tensor_2 The second tensor to be compared.
* @param[in] tensors (Optional) Further allowed tensors.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_quantization_info(const char *function, const char *file, const int line,
- const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
+inline arm_compute::Status error_on_mismatching_quantization_info(const char *function, const char *file, const int line,
+ const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
{
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_quantization_info(function, file, line, tensor_1->info(), tensor_2->info(),
detail::get_tensor_info_t<ITensorInfo *>()(tensors)...));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_quantization_info(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -582,11 +582,11 @@ void error_on_format_not_in(const char *function, const char *file, const int li
* @param[in] dt First data type allowed.
* @param[in] dts (Optional) Further allowed data types.
*
- * @return Error
+ * @return Status
*/
template <typename T, typename... Ts>
-inline arm_compute::Error error_on_data_type_not_in(const char *function, const char *file, const int line,
- const ITensorInfo *tensor_info, T &&dt, Ts &&... dts)
+inline arm_compute::Status error_on_data_type_not_in(const char *function, const char *file, const int line,
+ const ITensorInfo *tensor_info, T &&dt, Ts &&... dts)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor_info == nullptr, function, file, line);
@@ -599,7 +599,7 @@ inline arm_compute::Error error_on_data_type_not_in(const char *function, const
return d == tensor_dt;
}),
function, file, line, "ITensor data type %s not supported by this kernel", string_from_data_type(tensor_dt).c_str());
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
/** Return an error if the data type of the passed tensor does not match any of the data types provided.
*
@@ -610,15 +610,15 @@ inline arm_compute::Error error_on_data_type_not_in(const char *function, const
* @param[in] dt First data type allowed.
* @param[in] dts (Optional) Further allowed data types.
*
- * @return Error
+ * @return Status
*/
template <typename T, typename... Ts>
-inline arm_compute::Error error_on_data_type_not_in(const char *function, const char *file, const int line,
- const ITensor *tensor, T &&dt, Ts &&... dts)
+inline arm_compute::Status error_on_data_type_not_in(const char *function, const char *file, const int line,
+ const ITensor *tensor, T &&dt, Ts &&... dts)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor == nullptr, function, file, line);
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_type_not_in(function, file, line, tensor->info(), std::forward<T>(dt), std::forward<Ts>(dts)...));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(t, ...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
@@ -635,16 +635,16 @@ inline arm_compute::Error error_on_data_type_not_in(const char *function, const
* @param[in] dt First data type allowed.
* @param[in] dts (Optional) Further allowed data types.
*
- * @return Error
+ * @return Status
*/
template <typename T, typename... Ts>
-inline arm_compute::Error error_on_data_type_channel_not_in(const char *function, const char *file, const int line,
- const ITensorInfo *tensor_info, size_t num_channels, T &&dt, Ts &&... dts)
+inline arm_compute::Status error_on_data_type_channel_not_in(const char *function, const char *file, const int line,
+ const ITensorInfo *tensor_info, size_t num_channels, T &&dt, Ts &&... dts)
{
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_type_not_in(function, file, line, tensor_info, std::forward<T>(dt), std::forward<Ts>(dts)...));
const size_t tensor_nc = tensor_info->num_channels();
ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(tensor_nc != num_channels, function, file, line, "Number of channels %d. Required number of channels %d", tensor_nc, num_channels);
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
/** Return an error if the data type or the number of channels of the passed tensor does not match any of the data types and number of channels provided.
*
@@ -656,15 +656,15 @@ inline arm_compute::Error error_on_data_type_channel_not_in(const char *function
* @param[in] dt First data type allowed.
* @param[in] dts (Optional) Further allowed data types.
*
- * @return Error
+ * @return Status
*/
template <typename T, typename... Ts>
-inline arm_compute::Error error_on_data_type_channel_not_in(const char *function, const char *file, const int line,
- const ITensor *tensor, size_t num_channels, T &&dt, Ts &&... dts)
+inline arm_compute::Status error_on_data_type_channel_not_in(const char *function, const char *file, const int line,
+ const ITensor *tensor, size_t num_channels, T &&dt, Ts &&... dts)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor == nullptr, function, file, line);
ARM_COMPUTE_RETURN_ON_ERROR(error_on_data_type_channel_not_in(function, file, line, tensor->info(), num_channels, std::forward<T>(dt), std::forward<Ts>(dts)...));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c, ...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
@@ -678,10 +678,10 @@ inline arm_compute::Error error_on_data_type_channel_not_in(const char *function
* @param[in] line Line on which the error occurred.
* @param[in] tensor Tensor to validate.
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_tensor_not_2d(const char *function, const char *file, const int line,
- const ITensor *tensor);
+arm_compute::Status error_on_tensor_not_2d(const char *function, const char *file, const int line,
+ const ITensor *tensor);
#define ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(t) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
#define ARM_COMPUTE_RETURN_ERROR_ON_TENSOR_NOT_2D(t) \
@@ -696,11 +696,11 @@ arm_compute::Error error_on_tensor_not_2d(const char *function, const char *file
* @param[in] channel First channel allowed.
* @param[in] channels (Optional) Further allowed channels.
*
- * @return Error
+ * @return Status
*/
template <typename T, typename... Ts>
-inline arm_compute::Error error_on_channel_not_in(const char *function, const char *file, const int line,
- T cn, T &&channel, Ts &&... channels)
+inline arm_compute::Status error_on_channel_not_in(const char *function, const char *file, const int line,
+ T cn, T &&channel, Ts &&... channels)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(cn == Channel::UNKNOWN, function, file, line);
@@ -710,7 +710,7 @@ inline arm_compute::Error error_on_channel_not_in(const char *function, const ch
return f == cn;
}),
function, file, line);
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_CHANNEL_NOT_IN(c, ...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
@@ -725,10 +725,10 @@ inline arm_compute::Error error_on_channel_not_in(const char *function, const ch
* @param[in] fmt Input channel
* @param[in] cn First channel allowed.
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_channel_not_in_known_format(const char *function, const char *file, const int line,
- Format fmt, Channel cn);
+arm_compute::Status error_on_channel_not_in_known_format(const char *function, const char *file, const int line,
+ Format fmt, Channel cn);
#define ARM_COMPUTE_ERROR_ON_CHANNEL_NOT_IN_KNOWN_FORMAT(f, c) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
#define ARM_COMPUTE_RETURN_ERROR_ON_CHANNEL_NOT_IN_KNOWN_FORMAT(f, c) \
@@ -747,10 +747,10 @@ arm_compute::Error error_on_channel_not_in_known_format(const char *function, co
* @param[in] line Line on which the error occurred.
* @param[in] multi_hog IMultiHOG container to validate
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_invalid_multi_hog(const char *function, const char *file, const int line,
- const IMultiHOG *multi_hog);
+arm_compute::Status error_on_invalid_multi_hog(const char *function, const char *file, const int line,
+ const IMultiHOG *multi_hog);
#define ARM_COMPUTE_ERROR_ON_INVALID_MULTI_HOG(m) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_MULTI_HOG(m) \
@@ -763,8 +763,8 @@ arm_compute::Error error_on_invalid_multi_hog(const char *function, const char *
* @param[in] line Line on which the error occurred.
* @param[in] kernel Kernel to validate.
*/
-arm_compute::Error error_on_unconfigured_kernel(const char *function, const char *file, const int line,
- const IKernel *kernel);
+arm_compute::Status error_on_unconfigured_kernel(const char *function, const char *file, const int line,
+ const IKernel *kernel);
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
#define ARM_COMPUTE_RETURN_ERROR_ON_UNCONFIGURED_KERNEL(k) \
@@ -779,10 +779,10 @@ arm_compute::Error error_on_unconfigured_kernel(const char *function, const char
* @param[in] coords Coordinates inside the parent tensor where the first element of the subtensor is
* @param[in] shape Shape of the subtensor
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_invalid_subtensor(const char *function, const char *file, const int line,
- const TensorShape &parent_shape, const Coordinates &coords, const TensorShape &shape);
+arm_compute::Status error_on_invalid_subtensor(const char *function, const char *file, const int line,
+ const TensorShape &parent_shape, const Coordinates &coords, const TensorShape &shape);
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBTENSOR(p, c, s) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBTENSOR(p, c, s) \
@@ -796,10 +796,10 @@ arm_compute::Error error_on_invalid_subtensor(const char *function, const char *
* @param[in] parent_valid_region Parent valid region.
* @param[in] valid_region Valid region of subtensor.
*
- * @return Error
+ * @return Status
*/
-arm_compute::Error error_on_invalid_subtensor_valid_region(const char *function, const char *file, const int line,
- const ValidRegion &parent_valid_region, const ValidRegion &valid_region);
+arm_compute::Status error_on_invalid_subtensor_valid_region(const char *function, const char *file, const int line,
+ const ValidRegion &parent_valid_region, const ValidRegion &valid_region);
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBTENSOR_VALID_REGION(pv, sv) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
#define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBTENSOR_VALID_REGION(pv, sv) \
@@ -814,11 +814,11 @@ arm_compute::Error error_on_invalid_subtensor_valid_region(const char *function,
* @param[in] tensor_info_2 The second tensor info to be compared.
* @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_fixed_point_position(const char *function, const char *file, const int line,
- const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
+inline arm_compute::Status error_on_mismatching_fixed_point_position(const char *function, const char *file, const int line,
+ const ITensorInfo *tensor_info_1, const ITensorInfo *tensor_info_2, Ts... tensor_infos)
{
const std::array < const ITensorInfo *, 1 + sizeof...(Ts) > tensor_info_array{ { tensor_info_2, std::forward<Ts>(tensor_infos)... } };
ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(std::any_of(tensor_info_array.begin(), tensor_info_array.end(), [&](const ITensorInfo * tensor_info)
@@ -826,7 +826,7 @@ inline arm_compute::Error error_on_mismatching_fixed_point_position(const char *
return tensor_info->fixed_point_position() != tensor_info_1->fixed_point_position();
}),
function, file, line, "Tensors have different fixed-point positions");
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
/** Return an error if the input fixed-point positions are different.
*
@@ -837,15 +837,15 @@ inline arm_compute::Error error_on_mismatching_fixed_point_position(const char *
* @param[in] tensor_2 The second tensor to be compared.
* @param[in] tensors (Optional) Further allowed tensors.
*
- * @return Error
+ * @return Status
*/
template <typename... Ts>
-inline arm_compute::Error error_on_mismatching_fixed_point_position(const char *function, const char *file, const int line,
- const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
+inline arm_compute::Status error_on_mismatching_fixed_point_position(const char *function, const char *file, const int line,
+ const ITensor *tensor_1, const ITensor *tensor_2, Ts... tensors)
{
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_fixed_point_position(function, file, line, tensor_1->info(), tensor_2->info(),
detail::get_tensor_info_t<ITensorInfo *>()(tensors)...));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
@@ -860,10 +860,10 @@ inline arm_compute::Error error_on_mismatching_fixed_point_position(const char *
* @param[in] value The floating point value to be checked.
* @param[in] tensor_info Input tensor info that has information on data type and fixed-point position.
*
- * @return Error
+ * @return Status
*/
-inline arm_compute::Error error_on_value_not_representable_in_fixed_point(const char *function, const char *file, int line,
- float value, const ITensorInfo *tensor_info)
+inline arm_compute::Status error_on_value_not_representable_in_fixed_point(const char *function, const char *file, int line,
+ float value, const ITensorInfo *tensor_info)
{
const int fixed_point_position = tensor_info->fixed_point_position();
const DataType dt = tensor_info->data_type();
@@ -872,7 +872,7 @@ inline arm_compute::Error error_on_value_not_representable_in_fixed_point(const
ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(value > max_range, function, file, line,
"Value %f is not representable in %s with fixed-point position %d", value, string_from_data_type(dt).c_str(), fixed_point_position);
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
/** Return an error an error if the fixed-point value is not representable in the specified Q format.
*
@@ -882,14 +882,14 @@ inline arm_compute::Error error_on_value_not_representable_in_fixed_point(const
* @param[in] value The floating point value to be checked.
* @param[in] tensor Input tensor that has information on data type and fixed-point position.
*
- * @return Error
+ * @return Status
*/
-inline arm_compute::Error error_on_value_not_representable_in_fixed_point(const char *function, const char *file, int line,
- float value, const ITensor *tensor)
+inline arm_compute::Status error_on_value_not_representable_in_fixed_point(const char *function, const char *file, int line,
+ float value, const ITensor *tensor)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor == nullptr, function, file, line);
ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_value_not_representable_in_fixed_point(function, file, line, value, tensor->info()));
- return arm_compute::Error{};
+ return arm_compute::Status{};
}
#define ARM_COMPUTE_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(...) \
ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
diff --git a/arm_compute/core/utils/quantization/AsymmHelpers.h b/arm_compute/core/utils/quantization/AsymmHelpers.h
index e7a90fa216..6fd1d80010 100644
--- a/arm_compute/core/utils/quantization/AsymmHelpers.h
+++ b/arm_compute/core/utils/quantization/AsymmHelpers.h
@@ -35,15 +35,19 @@ namespace quantization
* @param[in] multiplier Real multiplier.
* @param[out] quant_multiplier Integer multiplier.
* @param[out] right_shift Right bit shift.
+ *
+ * @return a status
*/
-arm_compute::Error calculate_quantized_multiplier_less_than_one(double multiplier, int *quant_multiplier, int *right_shift);
+arm_compute::Status calculate_quantized_multiplier_less_than_one(double multiplier, int *quant_multiplier, int *right_shift);
/** Calculate quantized representation of multiplier having value greater than one.
*
* @param[in] multiplier Real multiplier.
* @param[out] quantized_multiplier Integer multiplier.
* @param[out] left_shift Left bit shift.
+ *
+ * @return a status
*/
-arm_compute::Error calculate_quantized_multiplier_greater_than_one(double multiplier, int *quantized_multiplier, int *left_shift);
+arm_compute::Status calculate_quantized_multiplier_greater_than_one(double multiplier, int *quantized_multiplier, int *left_shift);
} // namespace quantization
} // namespace arm_compute
#endif /* __ARM_COMPUTE_IO_FILE_HANDLER_H__ */