diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2019-05-21 13:32:43 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2019-06-03 14:51:29 +0000 |
commit | 4c5469b192665c94118a8a558787cb9cec2d0765 (patch) | |
tree | 168aa969de8243bdbb1f25247dd9f54d037ae32c /arm_compute/core/NEON | |
parent | 43a129e94df41f9ac8bc78b702da5a387ada0494 (diff) | |
download | ComputeLibrary-4c5469b192665c94118a8a558787cb9cec2d0765.tar.gz |
COMPMID-2225: Add interface support for new quantized data types.
Add support for:
-QSYMM8, 8-bit quantized symmetric
-QSYMM8_PER_CHANNEL, 8-bit quantized symmetric with per channel quantization
Change-Id: I00c4ff98e44af37419470af61419ee95d0de2463
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1236
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core/NEON')
-rw-r--r-- | arm_compute/core/NEON/NEAsymm.h | 8 | ||||
-rw-r--r-- | arm_compute/core/NEON/kernels/NEActivationLayerKernel.h | 1 | ||||
-rw-r--r-- | arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h | 2 |
3 files changed, 5 insertions, 6 deletions
diff --git a/arm_compute/core/NEON/NEAsymm.h b/arm_compute/core/NEON/NEAsymm.h index 253d0fdff7..2347c468ab 100644 --- a/arm_compute/core/NEON/NEAsymm.h +++ b/arm_compute/core/NEON/NEAsymm.h @@ -182,7 +182,7 @@ inline uint8_t finalize_quantization(int32_t in_value, int result_fixedpoint_mul * * @return Dequantized values in a neon vector */ -inline float32x4x2_t vdequantize(const uint8x8_t &qv, const QuantizationInfo &qi) +inline float32x4x2_t vdequantize(const uint8x8_t &qv, const UniformQuantizationInfo &qi) { const float scale = qi.scale; const int offset = qi.offset; @@ -205,7 +205,7 @@ inline float32x4x2_t vdequantize(const uint8x8_t &qv, const QuantizationInfo &qi * * @return Dequantized values in a neon vector */ -inline float32x4x4_t vdequantize(const uint8x16_t &qv, const QuantizationInfo &qi) +inline float32x4x4_t vdequantize(const uint8x16_t &qv, const UniformQuantizationInfo &qi) { const float scale = qi.scale; const int offset = qi.offset; @@ -230,7 +230,7 @@ inline float32x4x4_t vdequantize(const uint8x16_t &qv, const QuantizationInfo &q * * @return A neon vector holding the quantized values */ -inline uint8x8_t vquantize(const float32x4x2_t &qv, const QuantizationInfo &qi) +inline uint8x8_t vquantize(const float32x4x2_t &qv, const UniformQuantizationInfo &qi) { const float scale = qi.scale; const int offset = qi.offset; @@ -258,7 +258,7 @@ inline uint8x8_t vquantize(const float32x4x2_t &qv, const QuantizationInfo &qi) * * @return A neon vector holding the quantized values */ -inline uint8x16_t vquantize(const float32x4x4_t &qv, const QuantizationInfo &qi) +inline uint8x16_t vquantize(const float32x4x4_t &qv, const UniformQuantizationInfo &qi) { const float scale = qi.scale; const int offset = qi.offset; diff --git a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h index 447f4880ee..9381beaded 100644 --- a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h @@ -25,7 +25,6 @@ #define __ARM_COMPUTE_NEACTIVATIONLAYERKERNEL_H__ #include "arm_compute/core/NEON/INEKernel.h" -#include "arm_compute/core/QAsymm8.h" #include "arm_compute/core/utils/misc/Traits.h" #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h index cbb961f235..daa29fdf4f 100644 --- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h +++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h @@ -115,7 +115,7 @@ private: * */ using MulFunctionQASYMM8 = void(const void *__restrict input1_ptr, const void *__restrict input2_ptr, void *__restrict output_ptr, float scale, - const QuantizationInfo &input1_qua_info, const QuantizationInfo &input2_qua_info, const QuantizationInfo &output_qua_info); + const UniformQuantizationInfo &input1_qua_info, const UniformQuantizationInfo &input2_qua_info, const UniformQuantizationInfo &output_qua_info); MulFunctionFloat *_func_float; MulFunctionInt *_func_int; |