diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2021-03-09 14:09:08 +0000 |
---|---|---|
committer | Michele Di Giorgio <michele.digiorgio@arm.com> | 2021-03-31 17:08:51 +0000 |
commit | 33f41fabd30fb444aaa0cf3e65b61794d498d151 (patch) | |
tree | a381cff3096a3b05198b0cd311fee28e40fd5a4f /arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h | |
parent | 5f91b5d7063462854b62d342f9d4e04ae647e9a6 (diff) | |
download | ComputeLibrary-33f41fabd30fb444aaa0cf3e65b61794d498d151.tar.gz |
Fix trademarks throughout the codebase
Resolves: COMPMID-4299
Change-Id: Ie6a52c1371b9a2a7b5bb4f019ecd5e70a2008567
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5338
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h')
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h index 79b427ea6f..c22ed1b5c4 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h @@ -27,7 +27,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" -/** This file contains all available output stages for GEMMLowp on Neon. +/** This file contains all available output stages for GEMMLowp. * * In gemmlowp, the "output stage" is the process that takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyCore), * and processes it to obtain the final ASYMM8 value. @@ -40,7 +40,7 @@ namespace arm_compute class ITensor; class ITensorInfo; -/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint on Neon. +/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint. * * NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint depends on 3 parameters: * @@ -61,7 +61,7 @@ class ITensorInfo; * * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift * - * This function calls the following Neon kernels: + * This function calls the following kernels: * * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel * @@ -112,7 +112,7 @@ public: */ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max()); }; -/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint on Neon. +/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint. * * NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint depends on 3 parameters: * @@ -133,7 +133,7 @@ public: * * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift * - * This function calls the following Neon kernels: + * This function calls the following kernels: * * -# @ref NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel * @@ -184,7 +184,7 @@ public: */ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max()); }; -/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint on Neon. +/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint. * * NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint depends on 2 parameters: * @@ -205,7 +205,7 @@ public: * * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift * - * This function calls the following Neon kernels: + * This function calls the following kernels: * * -# @ref NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel * @@ -256,9 +256,9 @@ public: static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max()); }; -/** Basic function to execute GEMMLowpQuantizeDown kernels on Neon. +/** Basic function to execute GEMMLowpQuantizeDown kernels. * - * This function calls the following Neon kernels: + * This function calls the following kernels: * * -# @ref NEGEMMLowpQuantizeDownInt32ScaleKernel * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel |