diff options
-rw-r--r-- | Android.bp | 2 | ||||
-rw-r--r-- | arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h | 4 | ||||
-rw-r--r-- | arm_compute/core/NEON/NEKernels.h | 4 | ||||
-rw-r--r-- | arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h | 112 | ||||
-rw-r--r-- | arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h | 120 | ||||
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h | 6 | ||||
-rw-r--r-- | docs/00_introduction.dox | 1 | ||||
-rw-r--r-- | src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.cpp (renamed from src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp) | 222 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp | 34 | ||||
-rw-r--r-- | src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp | 147 | ||||
-rw-r--r-- | tests/validation/CL/GEMMLowp.cpp | 59 | ||||
-rw-r--r-- | tests/validation/NEON/GEMMLowp.cpp | 125 | ||||
-rw-r--r-- | tests/validation/fixtures/GEMMLowpFixture.h | 114 |
13 files changed, 557 insertions, 393 deletions
diff --git a/Android.bp b/Android.bp index 0d5c9e949d..0cb0b7770e 100644 --- a/Android.bp +++ b/Android.bp @@ -281,10 +281,10 @@ cc_library_static { "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp", "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp", "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp", + "src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.cpp", "src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp", "src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp", "src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp", - "src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp", "src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp", "src/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.cpp", "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp", diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h index f9599b5a0e..3378359d29 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h @@ -64,7 +64,7 @@ public: * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED - * @param[in] output_stage Output stage info. Used to pass the quantized output data type + * @param[in] output_stage GEMMLowp output stage metadata. */ void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleKernel @@ -73,7 +73,7 @@ public: * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED - * @param[in] output_stage Output stage info. Used to pass the quantized output data type + * @param[in] output_stage GEMMLowp output stage metadata. * * @return a status */ diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h index 5daad34468..d9f8f00c0b 100644 --- a/arm_compute/core/NEON/NEKernels.h +++ b/arm_compute/core/NEON/NEKernels.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 ARM Limited. + * Copyright (c) 2016-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -79,10 +79,10 @@ #include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" +#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h" -#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h new file mode 100644 index 0000000000..b4a1419c9b --- /dev/null +++ b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2020 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H +#define ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H + +#include "arm_compute/core/NEON/INEKernel.h" + +namespace arm_compute +{ +class ITensor; + +/** NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED + * + * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value. + * The following computations will be performed by the kernel: + * + * -# Add offset terms to final result + * -# Multiply each entry of result by result_mult_int + * -# Add bias to final result if bias tensor is not a nullptr + * -# Shift the int32 accumulator by result_shift + * -# Clamp the value between the specified min and max bounds + * -# Clamp the resulting int32 values: + * -# -to the [0..255] range and cast to QASYMM8. + * -# -to the [-128..127] range and cast to QASYMM8_SIGNED. + * + */ +class NEGEMMLowpQuantizeDownInt32ScaleKernel : public INEKernel +{ +public: + const char *name() const override + { + return "NEGEMMLowpQuantizeDownInt32ScaleKernel"; + } + /** Constructor */ + NEGEMMLowpQuantizeDownInt32ScaleKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers)*/ + NEGEMMLowpQuantizeDownInt32ScaleKernel(const NEGEMMLowpQuantizeDownInt32ScaleKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers)*/ + NEGEMMLowpQuantizeDownInt32ScaleKernel &operator=(const NEGEMMLowpQuantizeDownInt32ScaleKernel &) = delete; + /** Allow instances of this class to be moved */ + NEGEMMLowpQuantizeDownInt32ScaleKernel(NEGEMMLowpQuantizeDownInt32ScaleKernel &&) = default; + /** Allow instances of this class to be moved */ + NEGEMMLowpQuantizeDownInt32ScaleKernel &operator=(NEGEMMLowpQuantizeDownInt32ScaleKernel &&) = default; + /** Initialise the kernel's input and output. + * + * @param[in] input Input tensor. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. + * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[out] output_stage GEMMLowp output stage metadata. + */ + void configure(const ITensor *input, const ITensor *bias, ITensor *output, const GEMMLowpOutputStageInfo *output_stage); + /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ScaleKernel + * + * @param[in] input Input tensor. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. + * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[out] output_stage GEMMLowp output stage metadata. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage); + + // Inherited methods overridden: + void run(const Window &window, const ThreadInfo &info) override; + +private: + /** Template function to run the NEGEMMLowpQuantizeDownInt32ScaleKernel + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + template <typename T> + void run(const Window &window); + + /** Common signature for all the specialised NEGEMMLowpQuantizeDownInt32ScaleKernel functions + * + * @param[in] window Region on which to execute the kernel. + */ + using QuantizeDownFunctionPtr = void (NEGEMMLowpQuantizeDownInt32ScaleKernel::*)(const Window &window); + + QuantizeDownFunctionPtr _func; + const ITensor *_input; + const ITensor *_bias; + ITensor *_output; + const GEMMLowpOutputStageInfo *_output_stage; + bool _is_bounded_relu; +}; +} // namespace arm_compute + +#endif /* ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H */ diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h deleted file mode 100644 index 14cc383014..0000000000 --- a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEKERNEL_H -#define ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEKERNEL_H - -#include "arm_compute/core/NEON/INEKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8 - * - * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value. - * The following computations will be performed by the kernel: - * - * -# Add offset terms to final result - * -# Multiply each entry of result by result_mult_int - * -# Add bias to final result if bias tensor is not a nullptr - * -# Shift the int32 accumulator by result_shift - * -# Clamp the value between the specified min and max bounds - * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8. - * - */ -class NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel : public INEKernel -{ -public: - const char *name() const override - { - return "NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel"; - } - /** Constructor */ - NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers)*/ - NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel(const NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers)*/ - NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &operator=(const NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &) = delete; - /** Allow instances of this class to be moved */ - NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel(NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &&) = default; - /** Allow instances of this class to be moved */ - NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &operator=(NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &&) = default; - /** Initialise the kernel's input and output. - * - * @param[in] input Input tensor. Data type supported: S32 - * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. - * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. - * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8 - * @param[in] result_offset Offset to be added to each element of the input matrix - * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add - * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8 - * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 - * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, - * Along with @p min, this value can be used to implement "rectified linear unit" activation functions - */ - void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0); - /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel - * - * @param[in] input Input tensor. Data type supported: S32 - * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. - * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. - * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8 - * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 - * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, - * Along with @p min, this value can be used to implement "rectified linear unit" activation functions - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - -private: - /** Template function to run the NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel - * - * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). - */ - template <bool is_bounded_relu> - void run(const Window &window); - - /** Common signature for all the specialised NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel functions - * - * @param[in] window Region on which to execute the kernel. - */ - using QuantizeDownFunctionPtr = void (NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::*)(const Window &window); - - QuantizeDownFunctionPtr _func; - const ITensor *_input; - const ITensor *_bias; - ITensor *_output; - int _result_offset; - int _result_mult_int; - int _result_shift; - int _min; - int _max; -}; -} // namespace arm_compute - -#endif /* ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEKERNEL_H */ diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h index 283b052917..cbdc788c0a 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h @@ -51,7 +51,7 @@ class ITensor; * * This function calls the following NEON kernels: * - * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel + * -# @ref NEGEMMLowpQuantizeDownInt32ScaleKernel * * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions * after the result is shifted right by result_shift @@ -72,6 +72,7 @@ public: * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer. */ + ARM_COMPUTE_DEPRECATED_REL(20.05) void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max()); /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8Scale @@ -86,6 +87,7 @@ public: * * @return a status */ + ARM_COMPUTE_DEPRECATED_REL(20.05) static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max()); }; @@ -273,7 +275,7 @@ public: * * This function calls the following NEON kernels: * - * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel + * -# @ref NEGEMMLowpQuantizeDownInt32ScaleKernel * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel * -# @ref NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel * -# @ref NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox index d3ec24d743..67b879c37b 100644 --- a/docs/00_introduction.dox +++ b/docs/00_introduction.dox @@ -855,7 +855,6 @@ v17.12 Public major release - @ref NEDepthwiseConvolutionLayer3x3Kernel / NEDepthwiseIm2ColKernel / @ref NEGEMMMatrixVectorMultiplyKernel / NEDepthwiseVectorToTensorKernel / @ref NEDepthwiseConvolutionLayer - @ref NEGEMMLowpOffsetContributionKernel / @ref NEGEMMLowpMatrixAReductionKernel / @ref NEGEMMLowpMatrixBReductionKernel / @ref NEGEMMLowpMatrixMultiplyCore - @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel / @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint - - @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel / @ref NEGEMMLowpQuantizeDownInt32ToUint8Scale - NEWinogradLayer / NEWinogradLayerKernel - New OpenCL kernels / functions diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.cpp index a68e4e7efb..80ba2aff93 100644 --- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 ARM Limited. + * Copyright (c) 2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -21,29 +21,32 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h" +#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h" #include "arm_compute/core/AccessWindowStatic.h" #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/wrapper/wrapper.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include <arm_neon.h> #include <cstddef> #include <cstdint> -using namespace arm_compute; - -namespace +namespace arm_compute { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON(min > max); + + ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_max_bound > std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))); + ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_min_bound < std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)) + || output_stage->gemmlowp_min_bound > output_stage->gemmlowp_max_bound); // Check biases if exist if(bias != nullptr) @@ -55,46 +58,17 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con if(output->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8); + if(output->data_type() != output_stage->output_data_type && (output_stage->output_data_type == DataType::QASYMM8 || output_stage->output_data_type == DataType::QASYMM8_SIGNED)) + { + ARM_COMPUTE_RETURN_ERROR_MSG("Mismatching data types"); + } + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); } return Status{}; } -std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output) -{ - // Note: This kernel performs 16 elements per iteration. - // However, since we use a left-over for loop, we cannot have any read or write out of memory - // For this reason num_elems_processed_per_iteration is set to 1 - constexpr unsigned int num_elems_processed_per_iteration = 1; - - // Configure kernel window - Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); - - AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); - - bool window_changed = update_window_and_padding(win, - input_access); - - if(output->total_size() != 0) - { - AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration); - window_changed = window_changed || update_window_and_padding(win, output_result_access); - - output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); - } - - if(bias != nullptr) - { - AccessWindowStatic bias_access(bias, 0, 0, bias->dimension(0), bias->dimension(1)); - window_changed = window_changed || update_window_and_padding(win, bias_access); - } - - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_pair(err, win); -} - inline void scale_input(int32x4x4_t &in_s32, int32x4_t result_offset_s32, int32_t result_mult_int) { // Add the offset terms to GEMM's result @@ -110,23 +84,32 @@ inline void scale_input(int32x4x4_t &in_s32, int32x4_t result_offset_s32, int32_ in_s32.val[3] = vmulq_n_s32(in_s32.val[3], result_mult_int); } -template <bool is_bounded_relu> -inline uint8x16_t finalize_quantization(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8) +template <typename T> +inline typename std::enable_if<std::is_same<T, uint8_t>::value, + typename wrapper::traits::neon_vector<T, 16>::type>::type + convert_to_8bit(const int16x8x2_t in_s16) +{ + return wrapper::vcombine(wrapper::vqmovun(in_s16.val[0]), wrapper::vqmovun(in_s16.val[1])); +} + +template <typename T> +inline typename std::enable_if<std::is_same<T, int8_t>::value, + typename wrapper::traits::neon_vector<T, 16>::type>::type + convert_to_8bit(const int16x8x2_t in_s16) { - const static int32x4_t zero_s32 = vdupq_n_s32(0); + return wrapper::vcombine(wrapper::vqmovn(in_s16.val[0]), wrapper::vqmovn(in_s16.val[1])); +} +template <typename T> +inline typename wrapper::traits::neon_vector<T, 16>::type finalize_quantization(int32x4x4_t &in_s32, int32x4_t result_shift_s32, typename wrapper::traits::neon_vector<T, 16>::type min, + typename wrapper::traits::neon_vector<T, 16>::type max) +{ // Shift final result (negative value shift right) in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32); in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32); in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32); in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32); - // Saturate negative values - in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32); - in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32); - in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32); - in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32); - // Convert S32 to S16 const int16x8x2_t in_s16 = { @@ -136,38 +119,33 @@ inline uint8x16_t finalize_quantization(int32x4x4_t &in_s32, int32x4_t result_sh } }; - // Convert S16 to U8 - uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1])); + // Convert S16 to S8 or U8 + typename wrapper::traits::neon_vector<T, 16>::type out = convert_to_8bit<T>(in_s16); - if(is_bounded_relu) - { - out_u8 = vmaxq_u8(out_u8, min_u8); - out_u8 = vminq_u8(out_u8, max_u8); - } + out = wrapper::vmax(out, min); + out = wrapper::vmin(out, max); - return out_u8; + return out; } -} // namespace -namespace arm_compute -{ class Coordinates; -} // namespace arm_compute -template <bool is_bounded_relu> -void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window) +template <typename T> +void NEGEMMLowpQuantizeDownInt32ScaleKernel::run(const Window &window) { - const int32x4_t result_offset_s32 = vdupq_n_s32(_result_offset); - const int32x4_t result_shift_s32 = vdupq_n_s32(-_result_shift); - const uint8x16_t min_u8 = vdupq_n_u8(static_cast<uint8_t>(_min)); - const uint8x16_t max_u8 = vdupq_n_u8(static_cast<uint8_t>(_max)); + using VectorType = typename wrapper::traits::neon_vector<T, 16>::type; - ARM_COMPUTE_UNUSED(min_u8); - ARM_COMPUTE_UNUSED(max_u8); + const int32x4_t result_offset_s32 = vdupq_n_s32(_output_stage->gemmlowp_offset); + const int32x4_t result_shift_s32 = vdupq_n_s32(-_output_stage->gemmlowp_shift); + const int window_step_x = 16; + const auto window_start_x = static_cast<int>(window.x().start()); + const auto window_end_x = static_cast<int>(window.x().end()); - const int window_step_x = 16; - const auto window_start_x = static_cast<int>(window.x().start()); - const auto window_end_x = static_cast<int>(window.x().end()); + const int clamp_min = (_is_bounded_relu) ? _output_stage->gemmlowp_min_bound : std::numeric_limits<T>::lowest(); + const int clamp_max = (_is_bounded_relu) ? _output_stage->gemmlowp_max_bound : std::numeric_limits<T>::max(); + + VectorType min = wrapper::vdup_n(static_cast<T>(clamp_min), wrapper::traits::vector_128_tag{}); + VectorType max = wrapper::vdup_n(static_cast<T>(clamp_max), wrapper::traits::vector_128_tag{}); Window win(window); win.set(Window::DimX, Window::Dimension(0, 1, 1)); @@ -215,9 +193,9 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window) in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]); // Add the offset terms to GEMM's result and multiply by result_mult_int - scale_input(in_s32, result_offset_s32, _result_mult_int); + scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier); - vst1q_u8(out.ptr() + x, finalize_quantization<is_bounded_relu>(in_s32, result_shift_s32, min_u8, max_u8)); + wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max)); } // Compute left-over elements @@ -227,17 +205,10 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window) int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x); // Quantize - in_value = ((in_value + bias_value + _result_offset) * _result_mult_int) >> _result_shift; + in_value = ((in_value + bias_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift; - // Finalize and store the result - if(is_bounded_relu) - { - *(out.ptr() + x) = static_cast<uint8_t>(std::max(_min, std::min(_max, in_value))); - } - else - { - *(out.ptr() + x) = static_cast<uint8_t>(std::max(0, std::min(255, in_value))); - } + // Store the result + *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); } }, in, bias, out); @@ -261,9 +232,9 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window) }; // Add the offset terms to GEMM's result and multiply by result_mult_int - scale_input(in_s32, result_offset_s32, _result_mult_int); + scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier); - vst1q_u8(out.ptr() + x, finalize_quantization<is_bounded_relu>(in_s32, result_shift_s32, min_u8, max_u8)); + wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max)); } // Compute left-over elements @@ -272,74 +243,74 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window) int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x); // Quantize - in_value = ((in_value + _result_offset) * _result_mult_int) >> _result_shift; + in_value = ((in_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift; - // Finalize and store the result - if(is_bounded_relu) - { - *(out.ptr() + x) = static_cast<uint8_t>(std::max(_min, std::min(_max, in_value))); - } - else - { - *(out.ptr() + x) = static_cast<uint8_t>(std::max(0, std::min(255, in_value))); - } + // Store the result + *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); } }, in, out); } } -NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel() - : _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr), _result_offset(0), _result_mult_int(0), _result_shift(0), _min(0), _max(0) +NEGEMMLowpQuantizeDownInt32ScaleKernel::NEGEMMLowpQuantizeDownInt32ScaleKernel() + : _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr), _output_stage(nullptr), _is_bounded_relu(false) { } -void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min, int max) +void NEGEMMLowpQuantizeDownInt32ScaleKernel::configure(const ITensor *input, const ITensor *bias, ITensor *output, const GEMMLowpOutputStageInfo *output_stage) { // Perform validate step - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, output_stage); // Output auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(DataType::QASYMM8)); + auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_stage->output_data_type)); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), - min, - max)); - - _input = input; - _bias = bias; - _output = output; - _result_offset = result_offset; - _result_mult_int = result_mult_int; - _result_shift = result_shift; - _min = min; - _max = max; + output_stage)); + + _input = input; + _bias = bias; + _output = output; + _output_stage = output_stage; // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info()); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - INEKernel::configure(win_config.second); + Window win = calculate_max_window(*input->info(), Steps()); + Coordinates coord; + coord.set_num_dimensions(output->info()->num_dimensions()); + output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape())); + + INEKernel::configure(win); // Check if we need to clamp the result using min and max - const bool is_bounded_relu = !(min <= 0 && max >= 255); - _func = is_bounded_relu ? &NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run<true> : &NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run<false>; + _is_bounded_relu = ((_output_stage->gemmlowp_min_bound != _output_stage->gemmlowp_max_bound) + && !(_output_stage->gemmlowp_min_bound == std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)) + && _output_stage->gemmlowp_max_bound == std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)))); + if(_output_stage->output_data_type == DataType::QASYMM8) + { + _func = &NEGEMMLowpQuantizeDownInt32ScaleKernel::run<uint8_t>; + } + else if(_output_stage->output_data_type == DataType::QASYMM8_SIGNED) + { + _func = &NEGEMMLowpQuantizeDownInt32ScaleKernel::run<int8_t>; + } + else + { + ARM_COMPUTE_ERROR("Data type not supported"); + } } -Status NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max) +Status NEGEMMLowpQuantizeDownInt32ScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), - (bias != nullptr) ? bias->clone().get() : nullptr, - output->clone().get()) - .first); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, output_stage)); return Status{}; } -void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window, const ThreadInfo &info) +void NEGEMMLowpQuantizeDownInt32ScaleKernel::run(const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); @@ -347,3 +318,4 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window, co (this->*_func)(window); } +} // namespace arm_compute
\ No newline at end of file diff --git a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp index fbd1820098..2114d39866 100644 --- a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp @@ -156,22 +156,9 @@ void CLGEMMLowpOutputStage::configure(const ICLTensor *input, const ICLTensor *b } case GEMMLowpOutputStageType::QUANTIZE_DOWN: { - switch(info.output_data_type) - { - case DataType::QASYMM8: - case DataType::QASYMM8_SIGNED: - { - auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleKernel>(); - k->configure(input, bias, output, &info); - _kernel = std::move(k); - break; - } - default: - { - ARM_COMPUTE_ERROR("Unsupported output data type."); - break; - } - } + auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleKernel>(); + k->configure(input, bias, output, &info); + _kernel = std::move(k); break; } case GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT: @@ -206,22 +193,9 @@ Status CLGEMMLowpOutputStage::validate(const ITensorInfo *input, const ITensorIn } } case GEMMLowpOutputStageType::QUANTIZE_DOWN: - { - switch(output->data_type()) - { - case DataType::QASYMM8: - case DataType::QASYMM8_SIGNED: - { - return CLGEMMLowpQuantizeDownInt32ScaleKernel::validate(input, bias, output, &info); - } - default: - return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported output data type."); - } - } + return CLGEMMLowpQuantizeDownInt32ScaleKernel::validate(input, bias, output, &info); case GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT: - { return CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::validate(input, bias, output, &info); - } default: return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported GEMMLowpOutputStage type."); } diff --git a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp index 42d2ffce58..43ca7b3fbb 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp @@ -24,10 +24,10 @@ #include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h" #include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h" -#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h" #include "arm_compute/core/Validate.h" #include "support/MemorySupport.h" @@ -35,14 +35,25 @@ namespace arm_compute { void NEGEMMLowpQuantizeDownInt32ToUint8Scale::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min, int max) { - auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel>(); - k->configure(input, bias, output, result_offset, result_mult_int, result_shift, min, max); + GEMMLowpOutputStageInfo info = GEMMLowpOutputStageInfo(); + info.gemmlowp_offset = result_offset; + info.gemmlowp_multiplier = result_mult_int; + info.gemmlowp_shift = result_shift; + info.gemmlowp_min_bound = min; + info.gemmlowp_max_bound = max; + + auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ScaleKernel>(); + k->configure(input, bias, output, &info); _kernel = std::move(k); } Status NEGEMMLowpQuantizeDownInt32ToUint8Scale::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max) { - return NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::validate(input, bias, output, min, max); + GEMMLowpOutputStageInfo info = GEMMLowpOutputStageInfo(); + info.gemmlowp_min_bound = min; + info.gemmlowp_max_bound = max; + + return NEGEMMLowpQuantizeDownInt32ScaleKernel::validate(input, bias, output, &info); } void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, @@ -89,53 +100,63 @@ void NEGEMMLowpOutputStage::configure(const ITensor *input, const ITensor *bias, ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpOutputStage::validate(input->info(), bias != nullptr ? bias->info() : nullptr, output->info(), info)); - if(info.type == GEMMLowpOutputStageType::QUANTIZE_DOWN) + switch(info.type) { - switch(output->info()->data_type()) + case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT: { - case DataType::QASYMM8: + switch(info.output_data_type) { - auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel>(); - k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_offset, info.gemmlowp_min_bound, info.gemmlowp_max_bound); - _kernel = std::move(k); - break; + case DataType::QASYMM8: + { + auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel>(); + k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_offset, info.gemmlowp_min_bound, info.gemmlowp_max_bound); + _kernel = std::move(k); + break; + } + case DataType::QASYMM8_SIGNED: + { + auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel>(); + k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_offset, info.gemmlowp_min_bound, info.gemmlowp_max_bound); + _kernel = std::move(k); + break; + } + case DataType::QSYMM16: + { + auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel>(); + k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_min_bound, info.gemmlowp_max_bound); + _kernel = std::move(k); + break; + } + default: + { + ARM_COMPUTE_ERROR("Unsupported output data type."); + break; + } } - default: - ARM_COMPUTE_ERROR("Unsupported output data type."); + break; } - } - else if(info.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) - { - switch(output->info()->data_type()) + case GEMMLowpOutputStageType::QUANTIZE_DOWN: { - case DataType::QASYMM8: - { - auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel>(); - k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_offset, info.gemmlowp_min_bound, info.gemmlowp_max_bound); - _kernel = std::move(k); - break; - } - case DataType::QASYMM8_SIGNED: - { - auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel>(); - k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_offset, info.gemmlowp_min_bound, info.gemmlowp_max_bound); - _kernel = std::move(k); - break; - } - case DataType::QSYMM16: + switch(info.output_data_type) { - auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel>(); - k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_min_bound, info.gemmlowp_max_bound); - _kernel = std::move(k); - break; + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + { + auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ScaleKernel>(); + k->configure(input, bias, output, &info); + _kernel = std::move(k); + break; + } + default: + { + ARM_COMPUTE_ERROR("Unsupported output data type."); + break; + } } - default: - ARM_COMPUTE_ERROR("Unsupported output data type."); + break; } - } - else - { - ARM_COMPUTE_ERROR("Unsupported output stage quantization type."); + default: + ARM_COMPUTE_ERROR("Unsupported GEMMLowpOutputStage type."); } } @@ -147,29 +168,35 @@ Status NEGEMMLowpOutputStage::validate(const ITensorInfo *input, const ITensorIn ARM_COMPUTE_RETURN_ERROR_ON((info.type != GEMMLowpOutputStageType::QUANTIZE_DOWN) && (info.type != GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)); - if(info.type == GEMMLowpOutputStageType::QUANTIZE_DOWN) + switch(info.type) { - switch(output->data_type()) + case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT: { - case DataType::QASYMM8: - return NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::validate(input, bias, output, info.gemmlowp_min_bound, info.gemmlowp_max_bound); - default: - return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported output data type."); + switch(output->data_type()) + { + case DataType::QASYMM8: + return NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, info.gemmlowp_min_bound, info.gemmlowp_max_bound); + case DataType::QASYMM8_SIGNED: + return NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::validate(input, bias, output, info.gemmlowp_min_bound, info.gemmlowp_max_bound); + case DataType::QSYMM16: + return NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::validate(input, bias, output, info.gemmlowp_min_bound, info.gemmlowp_max_bound); + default: + return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported output data type."); + } } - } - else - { - switch(output->data_type()) + case GEMMLowpOutputStageType::QUANTIZE_DOWN: { - case DataType::QASYMM8: - return NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, info.gemmlowp_min_bound, info.gemmlowp_max_bound); - case DataType::QASYMM8_SIGNED: - return NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::validate(input, bias, output, info.gemmlowp_min_bound, info.gemmlowp_max_bound); - case DataType::QSYMM16: - return NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::validate(input, bias, output, info.gemmlowp_min_bound, info.gemmlowp_max_bound); - default: - return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported output data type."); + switch(output->data_type()) + { + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + return NEGEMMLowpQuantizeDownInt32ScaleKernel::validate(input, bias, output, &info); + default: + return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported output data type."); + } } + default: + return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported GEMMLowpOutputStage type."); } } } // namespace arm_compute diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp index 8aa81d0962..41a441c3d2 100644 --- a/tests/validation/CL/GEMMLowp.cpp +++ b/tests/validation/CL/GEMMLowp.cpp @@ -147,6 +147,65 @@ TEST_SUITE_END() // MatrixMultiplyCore TEST_SUITE(OutputStage) +TEST_SUITE(QuantizeDownInt32Scale) + +TEST_SUITE(QASYMM8) + +const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2, + 3) + * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true }); + +const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, + 2) + * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 173) * framework::dataset::make("addBias", { false, true }); + +using CLGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<CLTensor, CLAccessor, CLGEMMLowpOutputStage>; + +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +TEST_SUITE(BoundedReLu) +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +TEST_SUITE_END() // BoundedReLu +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) + +const auto quantize_down_int32_to_int8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2, + 3) + * framework::dataset::make("min", -128) * framework::dataset::make("max", 127) * framework::dataset::make("addBias", { false, true }); + +const auto quantize_down_int32_to_int8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, + 2) + * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", -100, -98) * framework::dataset::make("max", 71, 73) * framework::dataset::make("addBias", { false, true }); + +using CLGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture<CLTensor, CLAccessor, CLGEMMLowpOutputStage>; + +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +TEST_SUITE(BoundedReLu) +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_relu_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +TEST_SUITE_END() // BoundedReLu +TEST_SUITE_END() // QASYMM8_SIGNED +TEST_SUITE_END() // QuantizeDownInt32Scale + TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint) const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2) diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index de30bd5451..c3747ddd24 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -165,7 +165,9 @@ TEST_SUITE_END() // MatrixMultiplyCore TEST_SUITE(OutputStage) -TEST_SUITE(QuantizeDownInt32ToUint8Scale) +TEST_SUITE(QuantizeDownInt32Scale) + +TEST_SUITE(QASYMM8) const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2, 3) @@ -175,7 +177,7 @@ const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::m 2) * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true }); -using NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8Scale>; +using NEGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpOutputStage>; // *INDENT-OFF* // clang-format off @@ -198,85 +200,112 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( framework::dataset::make("Expected", { true, false })), a_info, b_info, output_info, min, max, expected) { + + GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(); + output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN; + output_stage.gemmlowp_min_bound = min; + output_stage.gemmlowp_max_bound = max; + output_stage.output_data_type = DataType::QASYMM8; + // Lock tensors - Status status = NEGEMMLowpQuantizeDownInt32ToUint8Scale::validate(&a_info.clone()->set_is_resizable(false), + Status status = NEGEMMLowpOutputStage::validate(&a_info.clone()->set_is_resizable(false), &b_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), - min, - max); + output_stage); ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases), - shape, result_offset, result_mult_int, result_shift, min, max, add_bias) +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases)) { - TensorShape shape_bias(shape[0]); + // Validate output + validate(Accessor(_target), _reference); +} - // Create tensors - Tensor in = create_tensor<Tensor>(shape, DataType::S32); - Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32); - Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8); +TEST_SUITE(BoundedReLu) +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases)) +{ + // Validate output + validate(Accessor(_target), _reference); +} - ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); +TEST_SUITE_END() // BoundedReLu - // Create and configure function - NEGEMMLowpQuantizeDownInt32ToUint8Scale output_stage; - output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_offset, result_mult_int, result_shift, min, max); +TEST_SUITE_END() // QASYMM8 - // Validate valid region input and output - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(in.info()->valid_region(), valid_region); - validate(out.info()->valid_region(), valid_region); +TEST_SUITE(QASYMM8_SIGNED) - // Validate valid region bias - if(add_bias) - { - const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); - validate(bias.info()->valid_region(), valid_region_bias); - } +const auto quantize_down_int32_to_int8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2, + 3) + * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); - // Validate padding - const PaddingSize padding(0); - validate(in.info()->padding(), padding); - validate(out.info()->padding(), padding); +const auto quantize_down_int32_to_int8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, + 2) + * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", -100, -98) * framework::dataset::make("max", 71, 74) * framework::dataset::make("addBias", { false, true }); - if(add_bias) - { - validate(bias.info()->padding(), padding); - } -} +using NEGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpOutputStage>; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases)) +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( + framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16 + TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type + }), + framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(20U), 1, DataType::S32), + })), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED), + TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED), + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), + })), + framework::dataset::make("Min",{ -10, + -200, + -113, + })), + framework::dataset::make("Max",{ 105, + 300, + -18, + })), + framework::dataset::make("Expected", { true, false, false })), + a_info, b_info, output_info, min, max, expected) { - // Validate output - validate(Accessor(_target), _reference); + GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(); + output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN; + output_stage.gemmlowp_min_bound = min; + output_stage.gemmlowp_max_bound = max; + output_stage.output_data_type = DataType::QASYMM8_SIGNED; + + // Lock tensors + Status status = NEGEMMLowpOutputStage::validate(&a_info.clone()->set_is_resizable(false), + &b_info.clone()->set_is_resizable(false), + &output_info.clone()->set_is_resizable(false), + output_stage); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); } +// clang-format on +// *INDENT-ON* -FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases)) +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_cases)) { // Validate output validate(Accessor(_target), _reference); } TEST_SUITE(BoundedReLu) -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases)) +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_relu_cases)) { // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_relu_cases)) -{ - // Validate output - validate(Accessor(_target), _reference); -} TEST_SUITE_END() // BoundedReLu -TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE_END() // QuantizeDownInt32Scale TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint) diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h index be9ce96dcb..e3dc7381fc 100644 --- a/tests/validation/fixtures/GEMMLowpFixture.h +++ b/tests/validation/fixtures/GEMMLowpFixture.h @@ -301,8 +301,16 @@ protected: TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1); // Create and configure function - FunctionType output_stage; - output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_offset, result_mult_int, result_shift, min, max); + FunctionType output_stage; + GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo(); + output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN; + output_stage_info.gemmlowp_offset = result_offset; + output_stage_info.gemmlowp_multiplier = result_mult_int; + output_stage_info.gemmlowp_shift = result_shift; + output_stage_info.gemmlowp_min_bound = min; + output_stage_info.gemmlowp_max_bound = max; + output_stage_info.output_data_type = DataType::QASYMM8; + output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info); ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -367,6 +375,108 @@ protected: }; template <typename TensorType, typename AccessorType, typename FunctionType> +class GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture : public framework::Fixture +{ +public: + template <typename...> + void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias) + { + _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias); + _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + std::uniform_int_distribution<> distribution(-6000, 6000); + library->fill(tensor, distribution, i); + } + + TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias) + { + TensorShape shape_bias(shape[0]); + + // Create tensors + TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1); + TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1); + TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8_SIGNED, 1); + + // Create and configure function + FunctionType output_stage; + GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo(); + output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN; + output_stage_info.gemmlowp_offset = result_offset; + output_stage_info.gemmlowp_multiplier = result_mult_int; + output_stage_info.gemmlowp_shift = result_shift; + output_stage_info.gemmlowp_min_bound = min; + output_stage_info.gemmlowp_max_bound = max; + output_stage_info.output_data_type = DataType::QASYMM8_SIGNED; + output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info); + + ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + a.allocator()->allocate(); + c.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensor + fill(AccessorType(a), 0); + + if(add_bias) + { + ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate bias tensor + b.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensor + fill(AccessorType(b), 1); + } + + // Compute GEMM function + output_stage.run(); + return c; + } + + SimpleTensor<int8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias) + { + // Create reference + TensorShape shape_bias(shape[0]); + + SimpleTensor<int32_t> a{ shape, DataType::S32, 1 }; + SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 }; + + // Fill reference + fill(a, 0); + + const std::vector<int32_t> result_mult_int_vec = { result_mult_int }; + const std::vector<int32_t> result_shift_vec = { result_shift }; + + if(add_bias) + { + // Fill bias + fill(b, 1); + + return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, b, result_offset, result_mult_int_vec, result_shift_vec, min, max); + } + else + { + return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, result_offset, result_mult_int_vec, result_shift_vec, min, max); + } + } + + TensorType _target{}; + SimpleTensor<int8_t> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType> class GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture : public framework::Fixture { public: |