diff options
-rw-r--r-- | arm_compute/core/CL/CLKernels.h | 1 | ||||
-rw-r--r-- | arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.h | 100 | ||||
-rw-r--r-- | arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h | 18 | ||||
-rw-r--r-- | arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h | 42 | ||||
-rw-r--r-- | src/core/CL/CLKernelLibrary.cpp | 1 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/gemmlowp.cl | 91 | ||||
-rw-r--r-- | src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.cpp | 207 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp | 12 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp | 16 | ||||
-rw-r--r-- | tests/validation/CL/BatchNormalizationLayer.cpp | 6 | ||||
-rw-r--r-- | tests/validation/CL/ConvolutionLayer.cpp | 2 | ||||
-rw-r--r-- | tests/validation/CL/DilatedConvolutionLayer.cpp | 2 |
12 files changed, 474 insertions, 24 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h index 298cf5241f..1e456fa17e 100644 --- a/arm_compute/core/CL/CLKernels.h +++ b/arm_compute/core/CL/CLKernels.h @@ -71,6 +71,7 @@ #include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h" +#include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h" diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.h new file mode 100644 index 0000000000..5a5d3938b7 --- /dev/null +++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEBYFLOATKERNEL_H__ +#define __ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEBYFLOATKERNEL_H__ + +#include "arm_compute/core/CL/ICLKernel.h" + +namespace arm_compute +{ +// Forward declarations +class ICLTensor; + +/** OpenCL kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8 + * + * This kernel takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value. + * The following computations will be performed by the kernel: + * + * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier + * -# Add bias to final result if bias tensor is not a nullptr + * -# Requantize + * -# Add offset to each result + * -# Clamp the value between the specified min and max bounds + * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8. + */ +class CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel : public ICLKernel +{ +public: + /** Constructor */ + CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers)*/ + CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel(const CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers)*/ + CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &operator=(const CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &) = delete; + /** Allow instances of this class to be moved */ + CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &&) = default; + /** Allow instances of this class to be moved */ + CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &operator=(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &&) = default; + /** Initialise the kernel's input and output. + * + * @param[in] input Input tensor. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. + * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8 + * @param[in] multiplier Float multiplier to be multiplied to each element of the input matrix + * @param[in] offset Offset to be applied to result before converting it back to QASYMM8 + * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 + * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, + * Along with @p min, this value can be used to implement "rectified linear unit" activation functions + * @param[in] output_3d_depth (Optional) Depth of output in 3D (Defaults to 1) + */ + void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, float multiplier, int offset, + int min = 0, int max = 0, unsigned int output_3d_depth = 1); + /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel + * + * @param[in] input Input tensor. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. + * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8 + * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 + * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, + * Along with @p min, this value can be used to implement "rectified linear unit" activation functions + * @param[in] output_3d_depth (Optional) Depth of output in 3D (Defaults to 1) + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, + int min = 0, int max = 0, unsigned int output_3d_depth = 1); + + // Inherited methods overridden: + void run(const Window &window, cl::CommandQueue &queue) override; + +private: + const ICLTensor *_input; + const ICLTensor *_bias; + ICLTensor *_output; + bool _reinterpret_as_3d; +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEBYFLOATKERNEL_H__ */ diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h index 958e70fca4..48b880174d 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h @@ -178,15 +178,15 @@ private: static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth = 1, bool skip_im2col = false); private: - CLMemoryGroup _memory_group; - CLConvolutionLayerReshapeWeights _reshape_weights; - CLIm2ColKernel _im2col_kernel; - CLGEMM _mm_gemm; - CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp; - CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage; - CLCol2ImKernel _col2im_kernel; - CLActivationLayer _activationlayer_function; - CLArithmeticAdditionKernel _add_bias_kernel; + CLMemoryGroup _memory_group; + CLConvolutionLayerReshapeWeights _reshape_weights; + CLIm2ColKernel _im2col_kernel; + CLGEMM _mm_gemm; + CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp; + CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat _gemmlowp_output_stage; + CLCol2ImKernel _col2im_kernel; + CLActivationLayer _activationlayer_function; + CLArithmeticAdditionKernel _add_bias_kernel; const ICLTensor *_original_weights; diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h b/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h index dca00f027e..51fcbe9392 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h +++ b/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h @@ -150,5 +150,47 @@ public: */ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0, unsigned int output_3d_depth = 1); }; + +/** Basic function to execute CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat on OpenCL. + * + * This function calls the following OpenCL kernels: + * + * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel + * + * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions + * after the result is shifted right by result_shift +*/ +class CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat : public ICLSimpleFunction +{ +public: + /** Initialise the kernel's inputs, output + * + * @param[in] input Input tensor. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. + * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8 + * @param[in] multiplier Float multiplier to be multiplied to each element of the input matrix + * @param[in] offset Offset to be applied to result before converting it back to QASYMM8 + * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 + * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, + * Along with @p min, this value can be used to implement "rectified linear unit" activation functions + * @param[in] output_3d_depth (Optional) Depth of output in 3D (Defaults to 1) + */ + void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, float multiplier, int offset, int min = 0, int max = 0, unsigned int output_3d_depth = 1); + /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint + * + * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. + * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8 + * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 + * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, + * Along with @p min, this value can be used to implement "rectified linear unit" activation functions + * @param[in] output_3d_depth (Optional) Depth of output in 3D (Defaults to 1) + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0, unsigned int output_3d_depth = 1); +}; } // namespace arm_compute #endif /*__ARM_COMPUTE_CLGEMMLOWPOUTPUTSTAGE_H__ */
\ No newline at end of file diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 12a7c38dfd..880963de7b 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -269,6 +269,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map = { "gemmlowp_offset_contribution", "gemmlowp.cl" }, { "gemmlowp_output_stage_quantize_down", "gemmlowp.cl" }, { "gemmlowp_output_stage_quantize_down_fixedpoint", "gemmlowp.cl" }, + { "gemmlowp_output_stage_quantize_down_float", "gemmlowp.cl" }, { "harris_score_3x3", "harris_corners.cl" }, { "harris_score_5x5", "harris_corners.cl" }, { "harris_score_7x7", "harris_corners.cl" }, diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl index 0fc3868341..80b5d00cf2 100644 --- a/src/core/CL/cl_kernels/gemmlowp.cl +++ b/src/core/CL/cl_kernels/gemmlowp.cl @@ -2276,3 +2276,94 @@ __kernel void gemmlowp_output_stage_quantize_down_fixedpoint(TENSOR3D_DECLARATIO vstore16(res, 0, dst.ptr); } #endif // defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT) + +#if defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET) +/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8 + * + * This kernel takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value. + * The following computations will be performed by the kernel: + * + * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier + * -# Add bias to final result if bias tensor is not a nullptr + * -# Requantize + * -# Add offset to each result + * -# Clamp the value between the specified min and max bounds + * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8. + * + * @attention The offset and scalar scale factor must be passed at compile time using -DRESULT_OFFSET, -DREAL_MULTIPLIER + * + * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time + * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND. + * These values can be used to implement "rectified linear unit" activation functions + * + * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[in] biases_ptr Pointer to the biases tensor. Supported data type: same as @p src_ptr + * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes) + * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor + * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8 + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes) + * @param[in] dst_step_w src_stride_w * number of elements along W processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void gemmlowp_output_stage_quantize_down_float(TENSOR3D_DECLARATION(src), +#if defined(ADD_BIAS) + VECTOR_DECLARATION(biases), +#endif // defined(ADD_BIAS) +#if defined(DST_HEIGHT) + TENSOR4D_DECLARATION(dst)) +#else // defined(DST_HEIGHT) + TENSOR3D_DECLARATION(dst)) +#endif // defined(DST_HEIGHT) +{ + // Compute source and destination addresses + Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src); +#if defined(DST_HEIGHT) + Tensor4D dst = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(dst, 1); + dst.ptr += get_global_id(0) * dst_step_x + (get_global_id(1) % DST_HEIGHT) * dst_step_y + (get_global_id(1) / DST_HEIGHT) * dst_step_z + get_global_id(2) * dst_step_w; +#else // defined(DST_HEIGHT) + Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); +#endif // defined(DST_HEIGHT) + +#if defined(ADD_BIAS) + Vector biases = CONVERT_TO_VECTOR_STRUCT(biases); +#endif // defined(ADD_BIAS) + + int16 input_values = vload16(0, (__global int *)src.ptr); + +#if defined(ADD_BIAS) + // Add bias + const int16 biases_values = vload16(0, (__global int *)biases.ptr); + input_values += (int16)biases_values; +#endif // defined(ADD_BIAS) + + // Convert to float + float16 input_values_f = convert_float16(input_values); + input_values_f = round(input_values_f * (float)REAL_MULTIPLIER + (float)OUTPUT_OFFSET); + + uchar16 res = convert_uchar16_sat(input_values_f); + +#if defined(MIN_BOUND) + res = max(res, (uchar16)MIN_BOUND); +#endif // defined(MIN_BOUND) +#if defined(MAX_BOUND) + res = min(res, (uchar16)MAX_BOUND); +#endif // defined(MAX_BOUND) + + // Store the result + vstore16(res, 0, dst.ptr); +} +#endif // defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET) diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.cpp new file mode 100644 index 0000000000..f0096bd3ad --- /dev/null +++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.cpp @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.h" + +#include "arm_compute/core/AccessWindowStatic.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +#include "support/ToolchainSupport.h" + +using namespace arm_compute; + +namespace arm_compute +{ +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, + int min, int max, unsigned int output_3d_depth) +{ + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON(max > 255); + ARM_COMPUTE_RETURN_ERROR_ON(min < 0 || min > max); + + // Check biases if exist + if(bias != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); + ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0)); + } + + if(output->total_size() != 0) + { + const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_output_stage_shape(*input, output_3d_depth, true); + const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(output_shape); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); + } + + return Status{}; +} + +std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output) +{ + constexpr unsigned int num_elems_processed_per_iteration = 16; + + // Configure kernel window + Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration)); + + AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); + + bool window_changed = update_window_and_padding(win, + input_access); + + if(output->total_size() != 0) + { + Window win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); + AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration); + window_changed = window_changed || update_window_and_padding(win_out, output_result_access); + + output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); + } + + if(bias != nullptr) + { + AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->tensor_shape()[1]); + window_changed = window_changed || update_window_and_padding(win, bias_access); + } + + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, win); +} +} // namespace + +class Coordinates; +} // namespace arm_compute + +CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel::CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel() + : _input(nullptr), _bias(nullptr), _output(nullptr), _reinterpret_as_3d(false) +{ +} + +Status CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, + int min, int max, unsigned int output_3d_depth) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max, output_3d_depth)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), + (bias != nullptr) ? bias->clone().get() : nullptr, + output->clone().get()) + .first); + + return Status{}; +} + +void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, + float multiplier, int offset, + int min, int max, unsigned int output_3d_depth) +{ + // Perform validate step + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + + // Output auto inizialitation if not yet initialized + const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_output_stage_shape(*input->info(), output_3d_depth, true); + auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(DataType::QASYMM8).set_tensor_shape(output_shape)); + + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), + min, max, output_3d_depth)); + + _input = input; + _bias = bias; + _output = output; + _reinterpret_as_3d = output_3d_depth > 1; + + // Set the arguments to pass at compile time + CLBuildOptions build_opts; + build_opts.add_option("-DREAL_MULTIPLIER=" + float_to_string_with_full_precision(multiplier)); + build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(offset)); + build_opts.add_option_if((min != 0) && (min != max), "-DMIN_BOUND=" + support::cpp11::to_string(min)); + build_opts.add_option_if((max != 255) && (min != max), "-DMAX_BOUND=" + support::cpp11::to_string(max)); + build_opts.add_option_if(bias != nullptr, "-DADD_BIAS"); + build_opts.add_option_if(_reinterpret_as_3d, "-DDST_HEIGHT=" + support::cpp11::to_string(input->info()->tensor_shape().y() / output_3d_depth)); + + // Create kernel + _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemmlowp_output_stage_quantize_down_float", build_opts.options())); + + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info()); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + ICLKernel::configure_internal(win_config.second); +} + +void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + // Create input window + Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); + Window slice = collapsed.first_slice_window_3D(); + + // Setup bias slice + unsigned int idx1 = num_arguments_per_3D_tensor(); + if(_bias != nullptr) + { + Window biases_slice(slice); + biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1)); + biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1)); + add_1D_tensor_argument(idx1, _bias, biases_slice); + } + + if(_reinterpret_as_3d) + { + // Create output window + Window window_out; + window_out.use_tensor_dimensions(_output->info()->tensor_shape()); + Window collapsed_out = window_out.collapse_if_possible(window_out, 3); + Window slice_out = collapsed.first_slice_window_4D(); + + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, _input, slice); + add_4D_tensor_argument(idx1, _output, slice_out); + enqueue(queue, *this, slice); + } + while(collapsed.slide_window_slice_3D(slice) && collapsed_out.slide_window_slice_4D(slice_out)); + } + else + { + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, _input, slice); + add_3D_tensor_argument(idx1, _output, slice); + enqueue(queue, *this, slice); + } + while(collapsed.slide_window_slice_3D(slice)); + } +} diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp index f41a12ae48..61180fd5d3 100644 --- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp @@ -284,17 +284,14 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * { const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info(); - float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale; - int output_multiplier, output_shift; - quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); - if(!_skip_col2im) { _memory_group.manage(&_tmp_output); gemm_output_staged_to_use = &_tmp_output; } - _gemmlowp_output_stage.configure(gemm_output_to_use, biases, gemm_output_staged_to_use, output_multiplier, output_shift, output_quant_info.offset); + float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale; + _gemmlowp_output_stage.configure(gemm_output_to_use, biases, gemm_output_staged_to_use, multiplier, output_quant_info.offset); } if(!_skip_col2im) @@ -448,17 +445,12 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI if(is_quantized) { - float multiplier = input->quantization_info().scale * weights_to_use->quantization_info().scale / output->quantization_info().scale; - int output_multiplier, output_shift; - quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); - if(!skip_col2im) { tmp_info = TensorInfo(gemm_output_to_use->tensor_shape(), 1, DataType::QASYMM8); tmp_info.set_quantization_info(output->quantization_info()); gemm_output_staged_to_use = &tmp_info; } - // Validate output stage for quantized case CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(gemm_output_to_use, biases, gemm_output_staged_to_use); } diff --git a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp index b18d23fac9..f5dc655776 100644 --- a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp @@ -25,6 +25,7 @@ #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h" +#include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h" #include "support/ToolchainSupport.h" @@ -56,4 +57,19 @@ Status CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITens { return CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max, output_3d_depth); } + +void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, + float multiplier, int offset, + int min, int max, unsigned int output_3d_depth) +{ + auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel>(); + k->configure(input, bias, output, multiplier, offset, min, max, output_3d_depth); + _kernel = std::move(k); +} + +Status CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, + int min, int max, unsigned int output_3d_depth) +{ + return CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel::validate(input, bias, output, min, max, output_3d_depth); +} } // namespace arm_compute
\ No newline at end of file diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp index cbf3c7092d..5aef357b4d 100644 --- a/tests/validation/CL/BatchNormalizationLayer.cpp +++ b/tests/validation/CL/BatchNormalizationLayer.cpp @@ -48,9 +48,9 @@ namespace validation { namespace { -RelativeTolerance<float> rel_tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ -constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.00001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ -constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +RelativeTolerance<float> rel_tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ +constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.0001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ +constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ const auto act_infos = framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp index 5c96cd4c59..0274bed977 100644 --- a/tests/validation/CL/ConvolutionLayer.cpp +++ b/tests/validation/CL/ConvolutionLayer.cpp @@ -48,7 +48,7 @@ namespace constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ +constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ /** CNN data types */ diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp index 9ebde38bcf..2ddc031e14 100644 --- a/tests/validation/CL/DilatedConvolutionLayer.cpp +++ b/tests/validation/CL/DilatedConvolutionLayer.cpp @@ -45,7 +45,7 @@ namespace { RelativeTolerance<float> rel_tolerance_f32(0.05f); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance<float> abs_tolerance_qasymm8(0.0); /**< Relative tolerance value for comparing reference's output against implementation's output for quantized data types */ +constexpr AbsoluteTolerance<float> abs_tolerance_qasymm8(1); /**< Relative tolerance value for comparing reference's output against implementation's output for quantized data types */ constexpr float abs_tolerance_f32 = 0.001f; /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F32 */ constexpr float abs_tolerance_f16 = 0.3f; /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F16 */ constexpr float tolerance_num_f16 = 0.07f; /**< Tolerance number for FP16 */ |