aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2019-06-13 15:58:32 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-25 13:31:23 +0000
commitbc415af5ee9517fd113e9ea0f01fdc84f9693dc4 (patch)
treea8ae892b71aba2e4384d7a738025cae20e8dcfbf
parente99196cf25a24fabd66ef33764d3c532f7cddf49 (diff)
downloadComputeLibrary-bc415af5ee9517fd113e9ea0f01fdc84f9693dc4.tar.gz
COMPMID-2406: Create a new GEMMLowpQuantizeDownInt32ToInt16ScaleKernel for NEON
Change-Id: I3f3e247728fd6dafca066e41835f0ef9442d9b7a Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1379 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/NESymm.h106
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h116
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h62
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp237
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp22
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp114
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h92
-rw-r--r--tests/validation/reference/GEMMLowp.cpp56
-rw-r--r--tests/validation/reference/GEMMLowp.h9
10 files changed, 807 insertions, 8 deletions
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 206ff06f8a..4023d82107 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -78,6 +78,7 @@
#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
diff --git a/arm_compute/core/NEON/NESymm.h b/arm_compute/core/NEON/NESymm.h
new file mode 100644
index 0000000000..0479753426
--- /dev/null
+++ b/arm_compute/core/NEON/NESymm.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NESYMM_H__
+#define __ARM_COMPUTE_NESYMM_H__
+
+#include "NEAsymm.h"
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+/** Performs final quantization step on 8 signed 16-bit elements
+ *
+ * @tparam is_bounded_relu Specified if a fused bounded relu should be applied
+ *
+ * @param[in] in_s32 Input to be quantized.
+ * @param[in] result_fixedpoint_multiplier Result multiplier parameter
+ * @param[in] result_shift Result shift parameter
+ * @param[in] min_s16 Relu lower bound
+ * @param[in] max_s16 Relu upper bound
+ *
+ * @return Quantized values
+ */
+template <bool is_bounded_relu>
+int16x8_t finalize_quantization_int16(int32x4x2_t &in_s32,
+ int result_fixedpoint_multiplier,
+ int32_t result_shift,
+ int16x8_t min_s16,
+ int16x8_t max_s16)
+{
+ // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar
+ in_s32.val[0] = vqrdmulhq_n_s32(in_s32.val[0], result_fixedpoint_multiplier);
+ in_s32.val[1] = vqrdmulhq_n_s32(in_s32.val[1], result_fixedpoint_multiplier);
+
+ // Round to the nearest division by a power-of-two using result_shift_s32
+ in_s32.val[0] = rounding_divide_by_pow2(in_s32.val[0], result_shift);
+ in_s32.val[1] = rounding_divide_by_pow2(in_s32.val[1], result_shift);
+
+ // Convert S32 to S16
+ int16x8_t out_s16 = vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1]));
+
+ if(is_bounded_relu)
+ {
+ out_s16 = vmaxq_s16(out_s16, min_s16);
+ out_s16 = vminq_s16(out_s16, max_s16);
+ }
+
+ return out_s16;
+}
+
+/** Performs final quantization step on single signed 16-bit element
+ *
+ * @tparam is_bounded_relu Specified if a fused bounded relu should be applied
+ *
+ * @param[in] in_value Input to be quantized.
+ * @param[in] result_fixedpoint_multiplier Result multiplier parameter
+ * @param[in] result_shift Result shift parameter
+ * @param[in] min_s16 Relu lower bound
+ * @param[in] max_s16 Relu upper bound
+ *
+ * @return Quantized values
+ */
+template <bool is_bounded_relu>
+inline int16_t finalize_quantization_int16(int32_t in_value, int result_fixedpoint_multiplier,
+ int32_t result_shift, int16_t min_s16, int16_t max_s16)
+{
+ int32x4_t in_s32 = vdupq_n_s32(in_value);
+
+ // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar
+ in_value = vgetq_lane_s32(vqrdmulhq_n_s32(in_s32, result_fixedpoint_multiplier), 0);
+
+ // Shift value by result_shift_s32
+ in_value = rounding_divide_by_pow2(in_value, result_shift);
+
+ // Bound the result
+ int16_t out_s16 = static_cast<int16_t>(std::max<int32_t>(-32768, std::min<int32_t>(32767, in_value)));
+
+ if(is_bounded_relu)
+ {
+ out_s16 = static_cast<int16_t>(std::max(min_s16, std::min(max_s16, out_s16)));
+ }
+
+ return out_s16;
+}
+} // namespace arm_compute
+#endif // __ARM_COMPUTE_NESYMM_H__
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h
new file mode 100644
index 0000000000..806c0e4c2f
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOINT16SCALEBYFIXEDPOINTKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOINT16SCALEBYFIXEDPOINTKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QSYMM16
+ *
+ * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QSYMM16 value.
+ * The following computations will be performed by the kernel:
+ *
+ * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
+ * -# Add bias to final result if bias tensor is not a nullptr
+ * -# Round to nearest division by a power-of-two using result_shift
+ * -# Clamp the value between the specified min and max bounds
+ * -# Clamp the resulting int32 values to the [-32768, 32767] range and cast to QSYMM16.
+ *
+ */
+class NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel";
+ }
+ /** Constructor */
+ NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers)*/
+ NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel(const NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers)*/
+ NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel &operator=(const NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel(NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel &operator=(NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel &&) = default;
+ /** Initialise the kernel's input and output.
+ *
+ * @param[in] input Input tensor. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. Data type supported: Data type supported: QSYMM16
+ * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift Integer value used to round to nearest division by a power-of-two the result after the fixed point multiplication
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to 0.
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16.
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to 0.
+ */
+ void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, int min = 0, int max = 0);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel
+ *
+ * @param[in] input Input tensor info. Data type supported: S32
+ * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+ * Biases are 1D tensor info with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[in] output Output tensor info. Data type supported: Data type supported: QSYMM16
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to 0.
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to 0.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+ /** Template function to run the NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ template <bool is_bounded_relu>
+ void run(const Window &window);
+
+ /** Common signature for all the specialised NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel functions
+ *
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using QuantizeDownFunctionPtr = void (NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::*)(const Window &window);
+
+ QuantizeDownFunctionPtr _func;
+ const ITensor *_input;
+ const ITensor *_bias;
+ ITensor *_output;
+ int _result_fixedpoint_multiplier;
+ int _result_shift;
+ int _min;
+ int _max;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOINT16SCALEBYFIXEDPOINTKERNEL_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
index 77bfb98ba6..5ece753660 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -147,5 +147,63 @@ public:
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
};
+/** Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint on NEON.
+ *
+ * NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint depends on 2 parameters:
+ *
+ * result_fixedpoint_multiplier, result_shift
+ *
+ * The final result is:
+ *
+ * (FixedPointMul(input[i][k], result_fixedpoint_multiplier) >> result_shift)
+ *
+ * where FixedPointMul(x, y) is the nearest integer to the following
+ * mathematical expression, evaluated without overflow or intermediate rounding:
+ *
+ * (x * y) / 2^31
+ *
+ * For more information: https://github.com/google/gemmlowp/blob/master/public/output_stages.h#L68
+ *
+ * In case the bias tensor is provided, the final result is:
+ *
+ * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift
+ *
+ * This function calls the following NEON kernels:
+ *
+ * -# @ref NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel
+ *
+ * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
+ * after the result is shifted right by result_shift
+*/
+class NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint : public INESimpleFunctionNoBorder
+{
+public:
+ /** Initialise the kernel's inputs, output
+ *
+ * @param[in] input Input tensor. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. Data type supported: Data type supported: QSYMM16
+ * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to 0.
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16.
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to 0.
+ */
+ void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, int min = 0, int max = 0);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint
+ *
+ * @param[in] input Input tensor info. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
+ * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[in] output Output tensor info. Data type supported: Data type supported: QSYMM16
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to 0.
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to 0.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
+};
} // namespace arm_compute
-#endif /*__ARM_COMPUTE_NEGEMMLOWPOUTPUTSTAGE_H__ */ \ No newline at end of file
+#endif /*__ARM_COMPUTE_NEGEMMLOWPOUTPUTSTAGE_H__ */
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp
new file mode 100644
index 0000000000..bc513e6618
--- /dev/null
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/NESymm.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_compute
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(max > 32767);
+ ARM_COMPUTE_RETURN_ERROR_ON(min < -32768 || min > max);
+
+ // Check biases if exist
+ if(bias != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
+ }
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QSYMM16);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, input);
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
+{
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output, input->clone()->set_data_type(DataType::QSYMM16));
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input, Steps());
+
+ // NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel doesn't need padding so update_window_and_padding() can be skipped
+ Coordinates coord;
+ coord.set_num_dimensions(output->num_dimensions());
+ output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
+
+ return std::make_pair(Status{}, win);
+}
+} // namespace
+
+class Coordinates;
+
+template <bool is_bounded_relu>
+void NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::run(const Window &window)
+{
+ const int16x8_t min_s16 = vdupq_n_s16(static_cast<int16_t>(_min));
+ const int16x8_t max_s16 = vdupq_n_s16(static_cast<int16_t>(_max));
+
+ ARM_COMPUTE_UNUSED(min_s16);
+ ARM_COMPUTE_UNUSED(max_s16);
+
+ const int window_step_x = 8;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in(_input, win_collapsed);
+ Iterator out(_output, win_collapsed);
+ if(_bias != nullptr)
+ {
+ Window win_biases;
+ win_biases.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ Iterator bias(_bias, win_biases);
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x2_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4)
+ }
+ };
+
+ const int32x4x2_t bias_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 4)
+ }
+ };
+
+ // Add the bias to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]);
+
+ vst1q_s16(reinterpret_cast<int16_t *>(out.ptr()) + x, finalize_quantization_int16<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, min_s16, max_s16));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32_t bias_value = *(reinterpret_cast<const int32_t *>(bias.ptr()) + x);
+ int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+
+ // Add bias
+ in_value += bias_value;
+ // Finalize and store the result
+ *(reinterpret_cast<int16_t *>(out.ptr()) + x) = finalize_quantization_int16<is_bounded_relu>(in_value, _result_fixedpoint_multiplier, _result_shift, static_cast<int16_t>(_min),
+ static_cast<int16_t>(_max));
+ }
+ },
+ in, out, bias);
+ }
+ else
+ {
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x2_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4)
+ }
+ };
+
+ vst1q_s16(reinterpret_cast<int16_t *>(out.ptr()) + x, finalize_quantization_int16<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, min_s16, max_s16));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+ ARM_COMPUTE_UNUSED(in_value);
+ // Finalize and store the result
+ *(reinterpret_cast<int16_t *>(out.ptr()) + x) = finalize_quantization_int16<is_bounded_relu>(in_value, _result_fixedpoint_multiplier, _result_shift, static_cast<int16_t>(_min),
+ static_cast<int16_t>(_max));
+ }
+ },
+ in, out);
+ }
+}
+
+NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel()
+ : _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr), _result_fixedpoint_multiplier(0), _result_shift(0), _min(0), _max(0)
+{
+}
+
+void NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift,
+ int min, int max)
+{
+ // Perform validate step
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), min, max));
+
+ _input = input;
+ _bias = bias;
+ _output = output;
+ _result_fixedpoint_multiplier = result_fixedpoint_multiplier;
+ _result_shift = result_shift;
+ _min = min;
+ _max = max;
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(input->info(), output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ INEKernel::configure(win_config.second);
+
+ // Check if we need to clamp the result using min and max
+ const bool is_bounded_relu = ((min != max) && !(min == -32768 && max == 32767));
+ _func = is_bounded_relu ? &NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::run<true> : &NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::run<false>;
+}
+
+Status NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
+
+ return Status{};
+}
+
+void NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+ (this->*_func)(window);
+}
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp
index ce69fa0bfd..b89e7a168e 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,12 +24,13 @@
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h"
#include "support/ToolchainSupport.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
void NEGEMMLowpQuantizeDownInt32ToUint8Scale::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min, int max)
{
auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel>();
@@ -53,4 +54,17 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const ITenso
Status NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
return NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max);
-} \ No newline at end of file
+}
+
+void NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, int min, int max)
+{
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel>();
+ k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, min, max);
+ _kernel = std::move(k);
+}
+
+Status NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+{
+ return NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::validate(input, bias, output, min, max);
+}
+} // namespace arm_compute
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index f0460b4a23..2f604c95ea 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -294,6 +294,9 @@ const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framewo
using NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture =
GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
+using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
+ GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
+
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
@@ -404,6 +407,117 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedP
TEST_SUITE_END() // BoundedReLu
TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
+
+TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
+
+const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
+ 2)
+ * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
+
+const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
+ 2)
+ * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
+
+using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
+ GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
+ framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
+ TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max
+ TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
+ }),
+ framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
+ TensorInfo(TensorShape(21U), 1, DataType::S32),
+ TensorInfo(TensorShape(20U), 1, DataType::S32),
+ })),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
+ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
+ TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
+ })),
+ framework::dataset::make("Min",{ -205,
+ -60000,
+ -180,
+ })),
+ framework::dataset::make("Max",{ 205,
+ 60000,
+ 180,
+ })),
+ framework::dataset::make("Expected", { true, false, false })),
+ a_info, b_info, output_info, min, max, expected)
+{
+ // Lock tensors
+ Status status = NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
+ &b_info.clone()->set_is_resizable(false),
+ &output_info.clone()->set_is_resizable(false),
+ min,
+ max);
+ ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
+ quantize_down_int32_to_int16_scale_by_fixedpoint_cases),
+ shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias)
+{
+ TensorShape shape_bias(shape[0]);
+
+ // Create tensors
+ Tensor in = create_tensor<Tensor>(shape, DataType::S32);
+ Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32);
+ Tensor out = create_tensor<Tensor>(shape, DataType::QSYMM16);
+
+ ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint output_stage;
+ output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, min, max);
+
+ // Validate valid region input and output
+ const ValidRegion valid_region = shape_to_valid_region(shape);
+ validate(in.info()->valid_region(), valid_region);
+ validate(out.info()->valid_region(), valid_region);
+
+ // Validate valid region bias
+ if(add_bias)
+ {
+ const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
+ validate(bias.info()->valid_region(), valid_region_bias);
+ }
+
+ // Validate padding
+ const PaddingSize padding(0);
+ validate(in.info()->padding(), padding);
+ validate(out.info()->padding(), padding);
+
+ if(add_bias)
+ {
+ validate(bias.info()->padding(), padding);
+ }
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
+ quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
+TEST_SUITE(BoundedReLu)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
+ quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // BoundedReLu
+
+TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint
+
TEST_SUITE_END() // OutputStage
TEST_SUITE_END() // GEMMLowp
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index ad5acfc418..8385221c78 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -385,6 +385,98 @@ protected:
SimpleTensor<uint8_t> _reference{};
};
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
+ {
+ _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
+ _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ std::uniform_int_distribution<> distribution(-6000, 6000);
+ library->fill(tensor, distribution, i);
+ }
+
+ TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
+ {
+ TensorShape shape_bias(shape[0]);
+
+ // Create tensors
+ TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
+ TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
+ TensorType c = create_tensor<TensorType>(shape, DataType::QSYMM16, 1);
+
+ // Create and configure function
+ FunctionType output_stage;
+ output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, min, max);
+
+ ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ c.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensor
+ fill(AccessorType(a), 0);
+
+ if(add_bias)
+ {
+ ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate bias tensor
+ b.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensor
+ fill(AccessorType(b), 1);
+ }
+
+ // Compute GEMM function
+ output_stage.run();
+ return c;
+ }
+
+ SimpleTensor<int16_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t min, int32_t max,
+ bool add_bias)
+ {
+ // Create reference
+ TensorShape shape_bias(shape[0]);
+
+ SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
+ SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
+
+ // Fill reference
+ fill(a, 0);
+
+ if(add_bias)
+ {
+ // Fill bias
+ fill(b, 1);
+
+ return reference::gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint<int32_t>(a, b, result_fixed_point_multiplier, result_shift, min, max);
+ }
+ else
+ {
+ return reference::gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint<int32_t>(a, result_fixed_point_multiplier, result_shift, min, max);
+ }
+ }
+
+ TensorType _target{};
+ SimpleTensor<int16_t> _reference{};
+};
+
template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture
{
diff --git a/tests/validation/reference/GEMMLowp.cpp b/tests/validation/reference/GEMMLowp.cpp
index 9a7e409e8a..97d05327e7 100644
--- a/tests/validation/reference/GEMMLowp.cpp
+++ b/tests/validation/reference/GEMMLowp.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -95,6 +95,34 @@ void quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> *in,
(*dst)[i] = static_cast<uint8_t>(std::max(0, std::min(255, result)));
}
}
+
+template <typename T>
+void quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> *in, const SimpleTensor<T> *bias, SimpleTensor<int16_t> *dst, int32_t result_fixedpoint_multiplier, int32_t result_shift,
+ int32_t min, int32_t max)
+{
+ const int cols_in = in->shape().x();
+
+ for(int i = 0; i < in->num_elements(); ++i)
+ {
+ int32_t result = (*in)[i];
+
+ if(bias != nullptr)
+ {
+ result += (*bias)[i % cols_in];
+ }
+
+ // Fixed point multiplication
+ result = asymm_rounding_divide_by_pow2(asymm_int_mult(result, result_fixedpoint_multiplier), result_shift);
+
+ // Bounded ReLu
+ if(min != max)
+ {
+ result = std::max(min, std::min(max, result));
+ }
+
+ (*dst)[i] = static_cast<int16_t>(std::max(-32768, std::min(32767, result)));
+ }
+}
} // namespace
template <typename T_out, typename T_in>
@@ -201,10 +229,36 @@ SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(
return dst;
}
+template <typename T>
+SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min,
+ int32_t max)
+{
+ SimpleTensor<int16_t> dst(in.shape(), DataType::QSYMM16);
+
+ quantize_down_int32_to_int16_scale_by_fixedpoint<T>(&in, nullptr, &dst, result_fixedpoint_multiplier, result_shift, min, max);
+
+ return dst;
+}
+
+template <typename T>
+SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier, int32_t result_shift,
+ int32_t min, int32_t max)
+{
+ SimpleTensor<int16_t> dst(in.shape(), DataType::QSYMM16);
+
+ quantize_down_int32_to_int16_scale_by_fixedpoint<T>(&in, &bias, &dst, result_fixedpoint_multiplier, result_shift, min, max);
+
+ return dst;
+}
+
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, int32_t result_fixedpoint_multiplier, int32_t result_shift,
int32_t result_offset_after_shift, int32_t min, int32_t max);
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_fixedpoint_multiplier,
int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max);
+template SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, int32_t result_fixedpoint_multiplier, int32_t result_shift,
+ int32_t min, int32_t max);
+template SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_fixedpoint_multiplier,
+ int32_t result_shift, int32_t min, int32_t max);
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min,
int32_t max);
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_offset, int32_t result_mult_int,
diff --git a/tests/validation/reference/GEMMLowp.h b/tests/validation/reference/GEMMLowp.h
index 4396155b96..5581f67652 100644
--- a/tests/validation/reference/GEMMLowp.h
+++ b/tests/validation/reference/GEMMLowp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,6 +58,13 @@ SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(
template <typename T>
SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier, int32_t result_shift,
int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0);
+
+template <typename T>
+SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, int32_t result_fixedpoint_multiplier, int32_t result_shift,
+ int32_t min, int32_t max);
+template <typename T>
+SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier,
+ int32_t result_shift, int32_t min, int32_t max);
} // namespace reference
} // namespace validation
} // namespace test