aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-08-22 11:44:04 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-08-23 13:13:08 +0000
commit29a01c90fc372d31188ab7157b45b32ce24fa9b3 (patch)
tree419b7abc22c56fde8dece4c80c328a209c041d94 /arm_compute/core
parentfb0fdcdaec57e6f8e1b96f924411921cc0ba6d94 (diff)
downloadComputeLibrary-29a01c90fc372d31188ab7157b45b32ce24fa9b3.tar.gz
COMPMID-2417: NEDequantizationLayer support for QASYMM8_PER_CHANNEL
Change-Id: I1ef4ce8610e11e81702b0b7f0f7c437fed49833e Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/1795 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h4
-rw-r--r--arm_compute/core/QuantizationInfo.h11
2 files changed, 13 insertions, 2 deletions
diff --git a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
index f0a2a57d1a..3e7feda650 100644
--- a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
@@ -52,13 +52,13 @@ public:
~NEDequantizationLayerKernel() = default;
/** Set input, output tensors.
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/QSYMM8/QSYMM16.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayerKernel
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[in] output Output tensor info. Data types supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h
index 79afca0714..1517d48381 100644
--- a/arm_compute/core/QuantizationInfo.h
+++ b/arm_compute/core/QuantizationInfo.h
@@ -103,6 +103,17 @@ public:
: _scale(scale), _offset()
{
}
+ /** Construct quantization info.
+ *
+ * @note Used for asymmetric per channel quantization
+ *
+ * @param[in] scale Scale.
+ * @param[in] offset Offset.
+ */
+ QuantizationInfo(std::vector<float> scale, std::vector<int32_t> offset)
+ : _scale(scale), _offset(offset)
+ {
+ }
/** Scale vector accessor
*
* @return A reference to quantization scale metadata