aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2019-07-17 15:59:32 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2019-07-17 16:22:42 +0000
commitff2719299ea76a95f20a35a7900875a8152e293a (patch)
treee7183168ea86d92e1a244cea78cac8a375e97ad3 /arm_compute/core
parentca1f460ec33e84b9df84e29de3c3b733e6042b9c (diff)
downloadComputeLibrary-ff2719299ea76a95f20a35a7900875a8152e293a.tar.gz
COMPMID-2336: Fix validation for quantized NEDepthwiseConvolutionLayer
Assertions from calculate_quantized_multiplier_less_than_one were not captured resulting in incorrect computation being performed on invalid inputs. This patch also fixes a discrepancy between the interfaces of validate and configure methods of NEDirectConvolutionLayerOutputStageKernel. Change-Id: I011822f63b4062b5b3346ef047adc684e0bffa3d Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/1552 Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h25
1 files changed, 15 insertions, 10 deletions
diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
index 9af3de5ffe..6c9002a3c5 100644
--- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -59,22 +59,27 @@ public:
* @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
* @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
* Data type supported: F16/F32
- * @param[in] result_fixedpoint_multiplier (Optional)Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
- * @param[in] result_shift (Optional)Integer value used to round to nearest division by a power-of-two the result after the fixed point multiplication
- * @param[in] result_offset_after_shift (Optional)Offset to be applied to result before converting it back to QASYMM8
+ * @param[in] result_fixedpoint_multiplier (Optional) Fixed point value to be multiplied to each element of the input matrix once the result_offset has been added
+ * @param[in] result_shift (Optional) Integer value used to round the result of the fixed point multiplication to nearest division by a power-of-two
+ * @param[in] result_offset_after_shift (Optional) Offset to be applied to result before converting it back to QASYMM8
*/
void configure(ITensor *input, const ITensor *bias = nullptr, ITensor *output = nullptr,
int result_fixedpoint_multiplier = 0, int result_shift = 0, int result_offset_after_shift = 0);
/** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayerOutputStageKernel
*
- * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: F16/F32
- * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
- * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
- * Data type supported: F16/F32
+ * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
+ * Data type supported: F16/F32
+ * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
+ * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
+ * Data type supported: F16/F32
+ * @param[in] result_fixedpoint_multiplier (Optional) Fixed point value to be multiplied to each element of the input matrix once the result_offset has been added
+ * @param[in] result_shift (Optional) Integer value used to round the result of the fixed point multiplication to nearest division by a power-of-two
+ * @param[in] result_offset_after_shift (Optional) Offset to be applied to result before converting it back to QASYMM8
+ *
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *bias = nullptr, const ITensorInfo *output = nullptr);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias = nullptr, const ITensorInfo *output = nullptr,
+ int result_fixedpoint_multiplier = 0, int result_shift = 0, int result_offset_after_shift = 0);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;