aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL
diff options
context:
space:
mode:
authorLuca Foschiani <luca.foschiani@arm.com>2020-02-26 14:30:14 +0000
committerLuca Foschiani <luca.foschiani@arm.com>2020-03-23 17:16:22 +0000
commit689c968239180eda4263e34c3d450093d4a0450d (patch)
tree9ecc01efac6f59f05c862bf32d6e1ee3ce5a69ed /src/core/CL
parent3bb75d60ced0cefa503e90f5d0d8cfe3db3f8637 (diff)
downloadComputeLibrary-689c968239180eda4263e34c3d450093d4a0450d.tar.gz
COMPMID-2967 Add support for QASYMM8_SIGNED in CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel
Signed-off-by: Luca Foschiani <luca.foschiani@arm.com> Change-Id: I4f7918630ea95fc28597b3d7b189f3d8fd35aef8 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2890 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/CL')
-rw-r--r--src/core/CL/cl_kernels/gemmlowp.cl18
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp (renamed from src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp)85
2 files changed, 52 insertions, 51 deletions
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index 8e7db9326f..3fba781ede 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -2042,9 +2042,9 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC
#endif // defined(K_OFFSET)
#if defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
-/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8
+/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
*
- * This kernel takes a final int32 accumulator value and processes it to obtain the final QASYMM8 value.
+ * This kernel takes a final int32 accumulator value and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
* The following computations will be performed by the kernel:
*
* -# Add offset terms to final result
@@ -2052,11 +2052,14 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC
* -# Add bias to final result (if -DADD_BIAS is passed at compile time)
* -# Shift the int32 accumulator by result_shift
* -# Clamp the value between the specified min and max bounds (if -DMIN_BOUND and/or -DMAX_BOUND are passed at compile time)
- * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8.
+ * -# Clamp the resulting int32 values:
+ * -# - to the [0..255] range and cast to QASYMM8.
+ * -# - to the [-128..127] range and cast to QASYMM8_SIGNED.
*
* @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
*
* @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
+ * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
* @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
* These values can be used to implement "rectified linear unit" activation functions
*
@@ -2072,7 +2075,7 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC
* @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
* @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
- * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
+ * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
* @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
@@ -2118,13 +2121,14 @@ __kernel void gemmlowp_output_stage_quantize_down(TENSOR3D_DECLARATION(src),
input_values >>= RESULT_SHIFT;
#endif // RESULT_SHIFT < 0
- uchar4 res = convert_uchar4_sat(input_values);
+ VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4)
+ res = CONVERT_SAT(input_values, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4));
#if defined(MIN_BOUND)
- res = max(res, (uchar4)MIN_BOUND);
+ res = max(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MIN_BOUND);
#endif // defined(MIN_BOUND)
#if defined(MAX_BOUND)
- res = min(res, (uchar4)MAX_BOUND);
+ res = min(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MAX_BOUND);
#endif // defined(MAX_BOUND)
// Store the result
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
index d3211f6ee8..002af6b471 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,27 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h"
+#include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h"
#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "support/StringSupport.h"
-using namespace arm_compute;
-
namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON(min > max);
+ ARM_COMPUTE_RETURN_ERROR_ON((output_stage->output_data_type != DataType::QASYMM8) && (output_stage->output_data_type != DataType::QASYMM8_SIGNED));
+ ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_max_bound > std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)));
+ ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_min_bound < std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))
+ || output_stage->gemmlowp_min_bound > output_stage->gemmlowp_max_bound);
// Check biases if exist
if(bias != nullptr)
@@ -53,15 +56,18 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con
if(output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() != output_stage->output_data_type, "Mismatching output data type");
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
}
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output, DataType output_data_type)
{
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output, input->clone()->set_data_type(output_data_type));
+
constexpr unsigned int num_elems_processed_per_iteration = 4;
// Configure kernel window
@@ -72,13 +78,9 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
bool window_changed = update_window_and_padding(win,
input_access);
- if(output->total_size() != 0)
- {
- AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, output_result_access);
-
- output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
- }
+ AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
+ window_changed = window_changed || update_window_and_padding(win, output_result_access);
+ output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
if(bias != nullptr)
{
@@ -89,65 +91,59 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
-} // namespace
-
-class Coordinates;
-} // namespace arm_compute
+} //namespace
-CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel()
- : _input(nullptr), _bias(nullptr), _output(nullptr)
+CLGEMMLowpQuantizeDownInt32ScaleKernel::CLGEMMLowpQuantizeDownInt32ScaleKernel()
+ : _input(nullptr), _bias(nullptr), _output(nullptr), _output_stage(nullptr)
{
}
-Status CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+Status CLGEMMLowpQuantizeDownInt32ScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(),
- (bias != nullptr) ? bias->clone().get() : nullptr,
- output->clone().get())
- .first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, output_stage));
return Status{};
}
-void CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_offset, int result_mult_int, int result_shift, int min,
- int max)
+void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage)
{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(DataType::QASYMM8));
-
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(),
(bias != nullptr) ? bias->info() : nullptr,
output->info(),
- min,
- max));
+ output_stage));
- _input = input;
- _bias = bias;
- _output = output;
+ _input = input;
+ _bias = bias;
+ _output = output;
+ _output_stage = output_stage;
// Set the arguments to pass at compile time
+ auto min = output_stage->gemmlowp_min_bound;
+ auto max = output_stage->gemmlowp_max_bound;
CLBuildOptions build_opts;
- build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(result_offset));
- build_opts.add_option("-DRESULT_MULT_INT=" + support::cpp11::to_string(result_mult_int));
- build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(result_shift));
- build_opts.add_option_if((min > 0), "-DMIN_BOUND=" + support::cpp11::to_string(min));
- build_opts.add_option_if((max < 255), "-DMAX_BOUND=" + support::cpp11::to_string(max));
+ build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage->gemmlowp_offset));
+ build_opts.add_option("-DRESULT_MULT_INT=" + support::cpp11::to_string(output_stage->gemmlowp_multiplier));
+ build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage->gemmlowp_shift));
+ build_opts.add_option_if((min > std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))) && (min != max),
+ "-DMIN_BOUND=" + support::cpp11::to_string(min));
+ build_opts.add_option_if((max < std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))) && (min != max),
+ "-DMAX_BOUND=" + support::cpp11::to_string(max));
+ build_opts.add_option("-DOUTPUT_DATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
// Create kernel
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemmlowp_output_stage_quantize_down", build_opts.options()));
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info());
+ auto win_config = validate_and_configure_window(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), output_stage->output_data_type);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
}
-void CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window, cl::CommandQueue &queue)
+void CLGEMMLowpQuantizeDownInt32ScaleKernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
@@ -173,3 +169,4 @@ void CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window, cl
}
while(collapsed.slide_window_slice_3D(slice));
}
+} \ No newline at end of file