diff options
author | Sheri Zhang <sheri.zhang@arm.com> | 2021-02-10 15:32:38 +0000 |
---|---|---|
committer | Michele Di Giorgio <michele.digiorgio@arm.com> | 2021-02-22 11:12:23 +0000 |
commit | ac6499a474503dfcf587d95b85823aeb5861103f (patch) | |
tree | fc0bdc870d564ef45468a5033f954b10505ec956 /arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h | |
parent | 669d17d9055089be1a20f6984630b95019fe2e78 (diff) | |
download | ComputeLibrary-ac6499a474503dfcf587d95b85823aeb5861103f.tar.gz |
Comply with Trademark rules for use of Neon, Arm and Mali
Full trademarks available in README.md
Resolves: COMPMID-4257
Signed-off-by: Sheri Zhang <sheri.zhang@arm.com>
Change-Id: Ibfba2adf2eef3449433f467464ebd87d7198474d
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5116
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h')
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h index 6977d27cb6..79b427ea6f 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" -/** This file contains all available output stages for GEMMLowp on NEON. +/** This file contains all available output stages for GEMMLowp on Neon. * * In gemmlowp, the "output stage" is the process that takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyCore), * and processes it to obtain the final ASYMM8 value. @@ -40,7 +40,7 @@ namespace arm_compute class ITensor; class ITensorInfo; -/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint on NEON. +/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint on Neon. * * NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint depends on 3 parameters: * @@ -61,7 +61,7 @@ class ITensorInfo; * * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift * - * This function calls the following NEON kernels: + * This function calls the following Neon kernels: * * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel * @@ -112,7 +112,7 @@ public: */ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max()); }; -/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint on NEON. +/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint on Neon. * * NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint depends on 3 parameters: * @@ -133,7 +133,7 @@ public: * * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift * - * This function calls the following NEON kernels: + * This function calls the following Neon kernels: * * -# @ref NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel * @@ -184,7 +184,7 @@ public: */ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max()); }; -/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint on NEON. +/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint on Neon. * * NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint depends on 2 parameters: * @@ -205,7 +205,7 @@ public: * * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift * - * This function calls the following NEON kernels: + * This function calls the following Neon kernels: * * -# @ref NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel * @@ -256,9 +256,9 @@ public: static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max()); }; -/** Basic function to execute GEMMLowpQuantizeDown kernels on NEON. +/** Basic function to execute GEMMLowpQuantizeDown kernels on Neon. * - * This function calls the following NEON kernels: + * This function calls the following Neon kernels: * * -# @ref NEGEMMLowpQuantizeDownInt32ScaleKernel * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel |