aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorge Wort <george.wort@arm.com>2019-02-22 16:37:41 +0000
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2019-03-15 13:34:00 +0000
commit2d7e683e79c8ad328d4930c1f82a46827313faf4 (patch)
treeeb81f928ecd2543ef80af87f65d1bdef5a78ea2a
parent3814b30623d6a9e570d850fe5ae275fe2117f3f5 (diff)
downloadComputeLibrary-2d7e683e79c8ad328d4930c1f82a46827313faf4.tar.gz
COMPMID-1694: Fuse offset contribution with the output stage when we use NEGEMMLowpMatrixMultiplyCore
Change-Id: Ic1a681e4cc03e1eba3bf8485d9cdb17b3e926047 Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/561 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h4
-rw-r--r--arm_compute/core/NEON/NEAsymm.h50
-rw-r--r--arm_compute/core/NEON/NEAsymm.inl9
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h136
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h37
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h60
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp3
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp11
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp651
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp39
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp241
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp215
-rw-r--r--tests/datasets/GEMMLowpFusedOffsetOutputDataset.h201
-rw-r--r--tests/validation/CL/GEMMLowp.cpp16
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp15
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h194
18 files changed, 1525 insertions, 360 deletions
diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h
index e6b79176b5..c66a470a75 100644
--- a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,7 +58,7 @@ public:
CLGEMMLowpOffsetContributionKernel &operator=(CLGEMMLowpOffsetContributionKernel &&) = default;
/** Initialise the kernel's input and output.
*
- * @param[in, out] mm_result Input tensor containing the result of @ref CLGEMMLowpMatrixMultiplyKernel
+ * @param[in, out] mm_result Input tensor containing the result of @ref CLGEMMLowpMatrixMultiplyKernel. Data type supported: S32
* @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
* Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
* @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
diff --git a/arm_compute/core/NEON/NEAsymm.h b/arm_compute/core/NEON/NEAsymm.h
index c7f59e9eba..997f28f1f0 100644
--- a/arm_compute/core/NEON/NEAsymm.h
+++ b/arm_compute/core/NEON/NEAsymm.h
@@ -45,6 +45,17 @@ using qasymm8x16_t = uint8x16_t; /**< 8 bit quantized asymmetric vector with 1
*/
int32x4_t rounding_divide_by_pow2(int32x4_t x, int exponent);
+/** Round to the nearest division by a power-of-two using exponent
+ *
+ * @note This function calculates the following expression: (x + 2^n -1 ) / 2^n where n = exponent
+ *
+ * @param[in] x Element to divide.
+ * @param[in] exponent Integer value used to round to nearest division by a power-of-two
+ *
+ * @return the nearest division by a power-of-two using exponent
+ */
+int32_t rounding_divide_by_pow2(int32_t x, int exponent);
+
/** Perform a multiply-accumulate on all 16 components of a QASYMM8 vector
*
* vd*vs + vo
@@ -125,6 +136,45 @@ uint8x16_t finalize_quantization(int32x4x4_t &in_s32,
return out_u8;
}
+/** Performs final quantization step on single element
+ *
+ * @tparam is_bounded_relu Specified if a fused bounded relu should be applied
+ *
+ * @param[in] in_value Input to be quantized.
+ * @param[in] result_fixedpoint_multiplier Result multiplier parameter
+ * @param[in] result_shift Result shift parameter
+ * @param[in] result_offset_after_shift_s32 Result offset parameter
+ * @param[in] min_u8 Relu lower bound
+ * @param[in] max_u8 Relu upper bound
+ *
+ * @return Quantized value
+ */
+template <bool is_bounded_relu>
+inline uint8_t finalize_quantization(int32_t in_value, int result_fixedpoint_multiplier,
+ int32_t result_shift, int32_t result_offset_after_shift_s32,
+ uint8_t min_u8, uint8_t max_u8)
+{
+ int32x4_t in_s32 = vdupq_n_s32(in_value);
+
+ // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar
+ in_value = vgetq_lane_s32(vqrdmulhq_n_s32(in_s32, result_fixedpoint_multiplier), 0);
+
+ // Shift value by result_shift_s32
+ in_value = rounding_divide_by_pow2(in_value, result_shift);
+
+ // Add the offset term
+ in_value += result_offset_after_shift_s32;
+
+ // Bound the result
+ uint8_t out_u8 = static_cast<uint8_t>(std::max(0, std::min(255, in_value)));
+ if(is_bounded_relu)
+ {
+ out_u8 = static_cast<uint8_t>(std::max(min_u8, std::min(max_u8, out_u8)));
+ }
+
+ return out_u8;
+}
+
/** Dequantize a neon vector holding 16 quantized values.
*
* @param qv Input values to be dequantized.
diff --git a/arm_compute/core/NEON/NEAsymm.inl b/arm_compute/core/NEON/NEAsymm.inl
index ce999a5413..209785d94e 100644
--- a/arm_compute/core/NEON/NEAsymm.inl
+++ b/arm_compute/core/NEON/NEAsymm.inl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,6 +31,13 @@ inline int32x4_t rounding_divide_by_pow2(int32x4_t x, int exponent)
return vrshlq_s32(fixed_up_x, shift_vec);
}
+inline int32_t rounding_divide_by_pow2(int32_t x, int exponent)
+{
+ const int32_t mask = (1 << exponent) - 1;
+ const int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
+ return (x >> exponent) + ((x & mask) > threshold ? 1 : 0);
+}
+
inline qasymm8x16_t vmlaq_qasymm8(qasymm8x16_t vd, float32x4_t vs, float32x4_t vo)
{
// Convert uint8 vectors to uint16 vectors
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index f1d94c89db..5b1b701a9d 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -73,6 +73,7 @@
#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h
new file mode 100644
index 0000000000..c284ca5c5f
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** NEON kernel used to add the offset contribution and perform the output stage after @ref NEGEMMLowpMatrixMultiplyKernel.
+ *
+ * The computation is performed in-place
+ *
+ * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel),
+ * and adds to it the offset contribution of matrix A and matrix B in-place.
+ *
+ * The output stage can perform either QuantizeDownInt32ToUint8Scale or QuantizeDownInt32ToUint8ScaleByFixedPoint.
+ *
+ * For QuantizeDownInt32ToUint8Scale the final result is:
+ *
+ * ((mm_result'[i][k] + result_offset) * result_mult_int) >> result_shift
+ *
+ * For QuantizeDownInt32ToUint8ScaleByFixedPoint the final result is:
+ *
+ * (FixedPointMul(mm_result'[i][k], result_fixedpoint_multiplier) >> result_shift) + result_offset_after_shift
+ *
+ * where FixedPointMul(x, y) is the nearest integer to the following
+ * mathematical expression, evaluated without overflow or intermediate rounding:
+ *
+ * (x * y) / 2^31
+ *
+ * and mm_result'[i][k] = mm_result[i][k] +
+ * (vector_sum_col[k] * a_offset) +
+ * (vector_sum_row[i] * b_offset) +
+ * (a_offset * b_offset * k)
+ */
+
+class NEGEMMLowpOffsetContributionOutputStageKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEGEMMLowpOffsetContributionOutputStageKernel";
+ }
+ /** Constructor */
+ NEGEMMLowpOffsetContributionOutputStageKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers)*/
+ NEGEMMLowpOffsetContributionOutputStageKernel(const NEGEMMLowpOffsetContributionOutputStageKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers)*/
+ NEGEMMLowpOffsetContributionOutputStageKernel &operator=(const NEGEMMLowpOffsetContributionOutputStageKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEGEMMLowpOffsetContributionOutputStageKernel(NEGEMMLowpOffsetContributionOutputStageKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEGEMMLowpOffsetContributionOutputStageKernel &operator=(NEGEMMLowpOffsetContributionOutputStageKernel &&) = default;
+ /** Initialise the kernel's input and output.
+ *
+ * @param[in] mm_result Input tensor containing the result of @ref NEGEMMLowpMatrixMultiplyKernel. Data type supported: S32
+ * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
+ * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
+ * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p mm_result.
+ * @param[out] output Output tensor containing the final quantized result. Data type supported: QASYMM8
+ * @param[in] k Number of matrix A columns or Matrix B rows
+ * @param[in] a_offset Offset to be added to each element of the matrix A.
+ * @param[in] b_offset Offset to be added to each element of the matrix B.
+ * @param[in] output_stage GEMMLowp output stage info, providing the type of quantization and the necessary parameters.
+ */
+ void configure(const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output, int32_t k, int32_t a_offset, int32_t b_offset,
+ GEMMLowpOutputStageInfo output_stage);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpOffsetContributionOutputStageKernel
+ *
+ * @param[in] mm_result Input tensor info containing the result of @ref NEGEMMLowpMatrixMultiplyKernel. Data type supported: S32
+ * @param[in] vector_sum_col Tensor info for the input row-vector of sums of all the entries in each column of matrix B.
+ * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
+ * @param[in] vector_sum_row Tensor info for the input row-vector of sums of all the entries in each row of matrix A.
+ * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
+ * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p mm_result.
+ * @param[in] output Output tensor info containing the final quantized result. Data type supported: QASYMM8
+ * @param[in] a_offset Offset to be added to each element of the matrix A.
+ * @param[in] b_offset Offset to be added to each element of the matrix B.
+ * @param[in] output_stage GEMMLowp output stage info, providing the type of quantization and the necessary parameters.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, int32_t a_offset,
+ int32_t b_offset,
+ GEMMLowpOutputStageInfo output_stage);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+ using NEGEMMLowpOffsetContributionOutputStageFunction = std::function<void(const Window, const ITensor *, const ITensor *, const ITensor *, const ITensor *,
+ ITensor *, int32_t, int32_t, int32_t, bool, GEMMLowpOutputStageInfo)>;
+
+private:
+ /** Function to use for the particular tensors passed to configure() */
+ NEGEMMLowpOffsetContributionOutputStageFunction _function;
+ const ITensor *_vector_sum_col;
+ const ITensor *_vector_sum_row;
+ const ITensor *_bias;
+ const ITensor *_mm_result;
+ ITensor *_output;
+ int32_t _a_offset;
+ int32_t _b_offset;
+ int32_t _k_offset;
+ bool _slide_vector_sum_col;
+ GEMMLowpOutputStageInfo _output_stage;
+};
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_NEGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
index 4345ff267b..67b22821da 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
@@ -71,7 +71,7 @@ public:
* This kernel performs the following computations:
*
* -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
- * -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
+ * -# Convert b values from QASYMM8 to int32 and add b_offset to each of them.
* -# Compute the matrix product of the resulting a * b in int32.
* -# Quantize to uint8 if gemm_info.gemmlowp_output_stage != NONE
*
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index 6df7af0d86..ace924f146 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -151,44 +151,51 @@ private:
*
* @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Data type supported: Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
* @param[out] output Output tensor. Data types supported: Same as @p input,
* except for input of QASYMM8 type where output should be of S32 type.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
* @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
*/
- void configure_mm(const ITensor *input, const ITensor *weights, ITensor *output, int gemm_3d_depth = 1);
+ void configure_mm(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo(), int gemm_3d_depth = 1);
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer matrix multiply routines
*
* @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Data type supported: Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
* @param[in] output Output tensor. Data types supported: Same as @p input,
* except for input of QASYMM8 type where output should be of S32 type.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
* @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
* @param[in] skip_im2col (Optional) Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout. (Default to false)
*
* @return a status
*/
- static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth = 1, bool skip_im2col = false);
+ static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo(),
+ int gemm_3d_depth = 1, bool skip_im2col = false);
/** Static function to check if GEMM3D is supported in @ref NEGEMM or in @ref NEGEMMLowpMatrixMultiplyCore
*
- * @param[in] data_type Input data type
+ * @param[in] input_info Input tensor info. Data types supported: QASYMM8/F16/F32.
+ * @param[in] act_info Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
* @param[in] gemm_3d_depth Depth of GEMM 3D
* @param[in] skip_im2col Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout
*
* @return a status
*/
- static Status validate_gemm3d(DataType data_type, int gemm_3d_depth, bool skip_im2col);
+ static Status validate_gemm3d(const ITensorInfo *input_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col);
private:
- MemoryGroup _memory_group;
- NEConvolutionLayerReshapeWeights _reshape_weights;
- NEIm2ColKernel _im2col_kernel;
- NEGEMM _mm_gemm;
- NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
- NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
- NECol2ImKernel _col2im_kernel;
- NEActivationLayer _activationlayer_function;
- NEArithmeticAdditionKernel _add_bias_kernel;
- NEReshapeLayer _reshape_layer;
+ MemoryGroup _memory_group;
+ NEConvolutionLayerReshapeWeights _reshape_weights;
+ NEIm2ColKernel _im2col_kernel;
+ NEGEMM _mm_gemm;
+ NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
+ NECol2ImKernel _col2im_kernel;
+ NEActivationLayer _activationlayer_function;
+ NEArithmeticAdditionKernel _add_bias_kernel;
+ NEReshapeLayer _reshape_layer;
const ITensor *_original_weights;
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index 682475c824..d3b27e4faf 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,7 @@
#include "arm_compute/core/NEON/INEKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
@@ -73,20 +74,24 @@ public:
* -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
* -# Compute the matrix product of the resulting a * b in int32.
*
+ * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is QASYMM8 otherwise
+ *
* @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
* @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
* @param[in] c Third input tensor (Matrix C). It can be a nullptr. Data type supported: S32
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32/QASYMM8
* @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
* if the reshape of matrix B should be executed only for the first run
*/
void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output, const GEMMInfo &gemm_info = GEMMInfo());
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixMultiplyCore
*
- * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[in] c Third input tensor (Matrix C). It can be a nullptr. Data type supported: S32
- * @param[in] output Output tensor. Data type supported: Data type supported: S32
+ * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is QASYMM8 otherwise
+ *
+ * @param[in] a First input tensor info (Matrix A). Data type supported: QASYMM8.
+ * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a
+ * @param[in] c Third input tensor info (Matrix C). It can be a nullptr. Data type supported: S32
+ * @param[in] output Output tensor info. Data type supported: Data type supported: S32/QASYMM8
* @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
* if the reshape of matrix B should be executed only for the first run
*
@@ -99,25 +104,28 @@ public:
void prepare() override;
private:
- MemoryGroup _memory_group;
- NEGEMMAssemblyDispatch _asm_glue;
- std::unique_ptr<INEKernel> _mm_kernel;
- std::unique_ptr<INEKernel> _mtx_a_reshape_kernel;
- std::unique_ptr<INEKernel> _mtx_b_reshape_kernel;
- NEGEMMLowpMatrixAReductionKernel _mtx_a_reduction_kernel;
- NEGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel;
- NEGEMMLowpOffsetContributionKernel _offset_contribution_kernel;
- Tensor _vector_sum_col;
- Tensor _vector_sum_row;
- Tensor _tmp_a;
- Tensor _tmp_b;
- const ITensor *_original_b;
- int32_t _a_offset;
- int32_t _b_offset;
- bool _run_vector_matrix_multiplication;
- bool _dot_product_path;
- bool _reshape_b_only_on_first_run;
- bool _is_prepared;
+ MemoryGroup _memory_group;
+ NEGEMMAssemblyDispatch _asm_glue;
+ std::unique_ptr<INEKernel> _mm_kernel;
+ std::unique_ptr<INEKernel> _mtx_a_reshape_kernel;
+ std::unique_ptr<INEKernel> _mtx_b_reshape_kernel;
+ NEGEMMLowpMatrixAReductionKernel _mtx_a_reduction_kernel;
+ NEGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel;
+ NEGEMMLowpOffsetContributionKernel _offset_contribution_kernel;
+ NEGEMMLowpOffsetContributionOutputStageKernel _offset_contribution_output_stage_kernel;
+ Tensor _vector_sum_col;
+ Tensor _vector_sum_row;
+ Tensor _tmp_a;
+ Tensor _tmp_b;
+ Tensor _mm_result_s32;
+ const ITensor *_original_b;
+ int32_t _a_offset;
+ int32_t _b_offset;
+ bool _run_vector_matrix_multiplication;
+ bool _dot_product_path;
+ bool _reshape_b_only_on_first_run;
+ bool _is_prepared;
+ bool _fuse_output_stage;
};
-}
+} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__ */
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
index 83af0c63eb..8fba342e74 100644
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,7 +51,6 @@ Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vecto
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type == GEMMLowpOutputStageType::NONE);
- ARM_COMPUTE_RETURN_ERROR_ON(bias == nullptr && a_offset == 0 && b_offset == 0);
ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > 255);
ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < 0 || output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
diff --git a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp
index 33a5b4ace3..22939266e5 100644
--- a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -106,20 +106,17 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result,
Window win = calculate_max_window(*mm_result, Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal mm_result_access(mm_result, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win,
- mm_result_access);
+ window_changed = window_changed || update_window_and_padding(win, mm_result_access);
if(a_offset != 0)
{
AccessWindowHorizontal vector_sum_col_access(vector_sum_col, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win,
- vector_sum_col_access);
+ window_changed = window_changed || update_window_and_padding(win, vector_sum_col_access);
}
if(b_offset != 0)
{
AccessWindowStatic vector_sum_row_access(vector_sum_row, 0, 0, vector_sum_row->dimension(0), 0); // NOLINT
- window_changed = window_changed || update_window_and_padding(win,
- vector_sum_row_access);
+ window_changed = window_changed || update_window_and_padding(win, vector_sum_row_access);
}
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
diff --git a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp
new file mode 100644
index 0000000000..ebbea083e3
--- /dev/null
+++ b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp
@@ -0,0 +1,651 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/NEAsymm.h"
+#include "arm_compute/core/NEON/wrapper/wrapper.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+
+namespace arm_compute
+{
+class Coordinates;
+
+namespace
+{
+inline int32x4x4_t load_results_input(const Iterator &mm_result_it, int32_t x)
+{
+ return
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 12)
+ }
+ };
+}
+
+inline int32x4x4_t load(const int32_t *ptr, int32_t x)
+{
+ return
+ {
+ {
+ vld1q_s32(ptr + x + 0),
+ vld1q_s32(ptr + x + 4),
+ vld1q_s32(ptr + x + 8),
+ vld1q_s32(ptr + x + 12)
+ }
+ };
+}
+
+inline int32x4x4_t get_a_offset(const int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x)
+{
+ int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x);
+
+ a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
+ a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
+ a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
+ a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
+ return a_offset_term_s32;
+}
+
+inline int32x4_t get_b_offset(const int32_t *vector_sum_row_ptr, int32_t b_offset)
+{
+ int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr);
+ b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);
+ return b_offset_term_s32;
+}
+
+inline int32x4x4_t get_k_offset(int32_t k_offset)
+{
+ return
+ {
+ {
+ vdupq_n_s32(k_offset),
+ vdupq_n_s32(k_offset),
+ vdupq_n_s32(k_offset),
+ vdupq_n_s32(k_offset)
+ }
+ };
+}
+
+template <bool is_bounded_relu>
+inline uint8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8)
+{
+ const static int32x4_t zero_s32 = vdupq_n_s32(0);
+
+ // Shift final result (negative value shift right)
+ in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
+ in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
+ in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
+ in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
+
+ // Saturate negative values
+ in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
+ in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
+ in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
+ in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
+
+ // Convert S32 to S16
+ const int16x8x2_t in_s16 =
+ {
+ {
+ vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
+ vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
+ }
+ };
+
+ // Convert S16 to U8
+ uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
+
+ if(is_bounded_relu)
+ {
+ out_u8 = vmaxq_u8(out_u8, min_u8);
+ out_u8 = vminq_u8(out_u8, max_u8);
+ }
+
+ return out_u8;
+}
+
+inline Window get_win_vector_sum(const Window &window)
+{
+ Window win_vector_sum(window);
+ win_vector_sum.set(Window::DimY, Window::Dimension(0, 0, 0));
+ win_vector_sum.set(Window::DimZ, Window::Dimension(0, 0, 0));
+ return win_vector_sum;
+}
+
+inline Iterator get_vector_sum_col_it(const Window &window, const ITensor *vector_sum_col)
+{
+ Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window));
+ return vector_sum_col_it;
+}
+
+inline Iterator get_vector_sum_row_it(const Window &window, const ITensor *vector_sum_row)
+{
+ Window win_vector_sum_row = get_win_vector_sum(window);
+ win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
+ Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
+ return vector_sum_row_it;
+}
+
+inline Iterator get_bias_it(const Window &window, const ITensor *bias)
+{
+ Window win_bias(window);
+ win_bias.set(Window::DimY, Window::Dimension(0, 1, 1));
+ win_bias.set(Window::DimZ, Window::Dimension(0, 1, 1));
+ Iterator bias_it(bias, win_bias);
+ return bias_it;
+}
+
+inline int32x4x4_t add_s32(int32x4x4_t a, int32x4_t b)
+{
+ return
+ {
+ {
+ vaddq_s32(a.val[0], b),
+ vaddq_s32(a.val[1], b),
+ vaddq_s32(a.val[2], b),
+ vaddq_s32(a.val[3], b)
+ }
+ };
+}
+
+inline int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t b)
+{
+ return
+ {
+ {
+ vaddq_s32(a.val[0], b.val[0]),
+ vaddq_s32(a.val[1], b.val[1]),
+ vaddq_s32(a.val[2], b.val[2]),
+ vaddq_s32(a.val[3], b.val[3])
+ }
+ };
+}
+
+inline int32x4x4_t mul_s32(int32x4x4_t &a, int32_t mul_scalar)
+{
+ return
+ {
+ {
+ vmulq_n_s32(a.val[0], mul_scalar),
+ vmulq_n_s32(a.val[1], mul_scalar),
+ vmulq_n_s32(a.val[2], mul_scalar),
+ vmulq_n_s32(a.val[3], mul_scalar)
+ }
+ };
+}
+
+template <bool has_a_offset, bool has_b_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point>
+inline void run_offset_contribution_output_stage_window(const int32_t *vector_sum_col_ptr, const int32_t *vector_sum_row_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
+ const int32x4_t result_offset_s32, const int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8,
+ int32_t a_offset, int32_t b_offset, int32_t k_offset,
+ GEMMLowpOutputStageInfo output_stage, int window_step_x, int window_start_x, int window_end_x)
+{
+ int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
+ if(!is_fixed_point)
+ {
+ // Combine quantization offset with other offsets.
+ offset_term_s32 = add_s32(offset_term_s32, result_offset_s32);
+ }
+ if(has_a_offset && has_b_offset)
+ {
+ offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset));
+ }
+ if(has_b_offset)
+ {
+ offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset));
+ }
+
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
+
+ if(has_a_offset)
+ {
+ in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
+ }
+ if(has_bias)
+ {
+ in_s32 = add_s32(in_s32, load(bias_ptr, x));
+ }
+ if(!is_fixed_point || has_b_offset)
+ {
+ in_s32 = add_s32(in_s32, offset_term_s32);
+ }
+ if(!is_fixed_point)
+ {
+ in_s32 = mul_s32(in_s32, output_stage.gemmlowp_multiplier);
+ }
+
+ if(is_fixed_point)
+ {
+ vst1q_u8(out_it.ptr() + x, finalize_quantization<is_bounded_relu>(in_s32, output_stage.gemmlowp_multiplier, output_stage.gemmlowp_shift, result_offset_s32, min_u8, max_u8));
+ }
+ else
+ {
+ vst1q_u8(out_it.ptr() + x, finalize_quantization_floating_point<is_bounded_relu>(in_s32, result_shift_s32, min_u8, max_u8));
+ }
+ }
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
+
+ if(has_a_offset)
+ {
+ in_value += (*(vector_sum_col_ptr + x) * a_offset);
+ }
+ if(has_bias)
+ {
+ in_value += *(bias_ptr + x);
+ }
+
+ if(is_fixed_point)
+ {
+ // Finalize and store the result
+ *(out_it.ptr() + x) = finalize_quantization<is_bounded_relu>(in_value, output_stage.gemmlowp_multiplier, output_stage.gemmlowp_shift,
+ output_stage.gemmlowp_offset, static_cast<uint8_t>(output_stage.gemmlowp_min_bound), static_cast<uint8_t>(output_stage.gemmlowp_max_bound));
+ }
+ else
+ {
+ // Finalize quantization
+ in_value = (in_value * output_stage.gemmlowp_multiplier) >> output_stage.gemmlowp_shift;
+
+ // Bound and store the result
+ if(is_bounded_relu)
+ {
+ in_value = static_cast<uint8_t>(std::max(output_stage.gemmlowp_min_bound, std::min(output_stage.gemmlowp_max_bound, in_value)));
+ }
+ *(out_it.ptr() + x) = static_cast<uint8_t>(std::max(0, std::min(255, in_value)));
+ }
+ }
+}
+
+template <bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point>
+void run_offset_contribution_output_stage(const Window &window,
+ const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
+ int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
+ GEMMLowpOutputStageInfo output_stage)
+{
+ const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
+ const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
+
+ const int32x4_t result_offset_s32 = vdupq_n_s32(output_stage.gemmlowp_offset);
+ const int32x4_t result_shift_s32 = vdupq_n_s32(is_fixed_point ? output_stage.gemmlowp_shift : -output_stage.gemmlowp_shift);
+ const uint8x16_t min_u8 = vdupq_n_u8(static_cast<uint8_t>(output_stage.gemmlowp_min_bound));
+ const uint8x16_t max_u8 = vdupq_n_u8(static_cast<uint8_t>(output_stage.gemmlowp_max_bound));
+
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win(window);
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
+
+ Iterator mm_result_it(mm_result, win);
+ Iterator out_it(output, win);
+
+ if((a_offset != 0) && (b_offset != 0))
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
+
+ Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
+ Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
+
+ const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
+
+ // Offset in case vector_sum_col is batched
+ const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
+
+ if(bias != nullptr)
+ {
+ Iterator bias_it = get_bias_it(collapsed_window, bias);
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ const int batch_id = id.z() / depth_input;
+ const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
+ const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
+ + id.y() + (id.z() % depth_input) * height_input;
+ run_offset_contribution_output_stage_window<true, true, true, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
+ out_it,
+ result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
+ output_stage, window_step_x, window_start_x, window_end_x);
+ },
+ vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it);
+ }
+ else
+ {
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ const int batch_id = id.z() / depth_input;
+ const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
+ const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
+ + id.y() + (id.z() % depth_input) * height_input;
+ run_offset_contribution_output_stage_window<true, true, false, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
+ result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
+ output_stage, window_step_x, window_start_x, window_end_x);
+ },
+ vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it);
+ }
+ }
+ else if((a_offset == 0) && (b_offset != 0))
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
+
+ Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
+
+ const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
+
+ if(bias != nullptr)
+ {
+ Iterator bias_it = get_bias_it(collapsed_window, bias);
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ const int batch_id = id.z() / depth_input;
+ const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
+ + id.y() + (id.z() % depth_input) * height_input;
+ run_offset_contribution_output_stage_window<false, true, true, is_bounded_relu, is_fixed_point>(nullptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
+ result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
+ output_stage, window_step_x, window_start_x, window_end_x);
+ },
+ vector_sum_row_it, bias_it, mm_result_it, out_it);
+ }
+ else
+ {
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ const int batch_id = id.z() / depth_input;
+ const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
+ + id.y() + (id.z() % depth_input) * height_input;
+ run_offset_contribution_output_stage_window<false, true, false, is_bounded_relu, is_fixed_point>(nullptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
+ result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
+ output_stage, window_step_x, window_start_x, window_end_x);
+ },
+ vector_sum_row_it, mm_result_it, out_it);
+ }
+ }
+ else if((a_offset != 0) && (b_offset == 0))
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
+
+ Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
+
+ // Offset in case vector_sum_col is batched
+ const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
+
+ if(bias != nullptr)
+ {
+ Iterator bias_it = get_bias_it(collapsed_window, bias);
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ const int batch_id = id.z() / depth_input;
+ const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
+ run_offset_contribution_output_stage_window<true, false, true, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
+ result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
+ output_stage, window_step_x, window_start_x, window_end_x);
+ },
+ vector_sum_col_it, bias_it, mm_result_it, out_it);
+ }
+ else
+ {
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ const int batch_id = id.z() / depth_input;
+ const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
+ run_offset_contribution_output_stage_window<true, false, false, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, nullptr, nullptr, mm_result_it, out_it,
+ result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
+ output_stage, window_step_x, window_start_x, window_end_x);
+ },
+ vector_sum_col_it, mm_result_it, out_it);
+ }
+ }
+ else
+ {
+ if(bias != nullptr)
+ {
+ Iterator bias_it = get_bias_it(collapsed_window, bias);
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ run_offset_contribution_output_stage_window<false, false, true, is_bounded_relu, is_fixed_point>(nullptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
+ result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
+ output_stage, window_step_x, window_start_x, window_end_x);
+ },
+ bias_it, mm_result_it, out_it);
+ }
+ else
+ {
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ run_offset_contribution_output_stage_window<false, false, false, is_bounded_relu, is_fixed_point>(nullptr, nullptr, nullptr, mm_result_it, out_it,
+ result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
+ output_stage, window_step_x, window_start_x, window_end_x);
+ },
+ mm_result_it, out_it);
+ }
+ return;
+ }
+}
+
+Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
+ int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > 255);
+ ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < 0 || output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
+ ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN && output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT);
+
+ if(bias != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
+ }
+
+ // If a_offset == 0, vector_sum_col can be a nullptr
+ if(a_offset != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
+ }
+
+ // If b_offset == 0, vector_sum_row can be a nullptr
+ if(b_offset != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
+
+ // Check if input is a 3D reinterpretation
+ const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
+
+ // Validate input
+ ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
+ ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
+
+ TensorShape output_shape = output->tensor_shape();
+ if(output_shape.num_dimensions() > 1)
+ {
+ const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
+
+ TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
+ vector_sum_row_shape.collapse_from(1);
+ output_shape.collapse_from(output_batch_idx);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
+ "mm_result tensor must have the same number of batches of output tensor");
+
+ if(a_offset != 0)
+ {
+ TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
+ vector_sum_col_shape.collapse_from(1);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
+ "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
+ }
+ }
+ }
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *output)
+{
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output, mm_result->clone()->set_data_type(DataType::QASYMM8));
+
+ // Configure kernel window
+ Window win = calculate_max_window(*mm_result, Steps());
+
+ // Note: This kernel performs 16 elements per iteration.
+ // However, since we use a left-over for loop, we cannot have any read or write out of memory
+ // For this reason num_elems_processed_per_iteration is 1 and so update_window_and_padding() can be skipped
+ Coordinates coord;
+ coord.set_num_dimensions(output->num_dimensions());
+ output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
+
+ return std::make_pair(Status{}, win);
+}
+
+NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction
+get_configured_function(const ITensor *mm_result, const ITensor *vector_sum_row, GEMMLowpOutputStageInfo output_stage)
+{
+ static std::map<uint8_t, NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction> map_function =
+ {
+ { 0, &run_offset_contribution_output_stage<false, false, false> },
+ { 1, &run_offset_contribution_output_stage<true, false, false> },
+ { 2, &run_offset_contribution_output_stage<false, true, false> },
+ { 3, &run_offset_contribution_output_stage<true, true, false> },
+ { 4, &run_offset_contribution_output_stage<false, false, true> },
+ { 5, &run_offset_contribution_output_stage<true, false, true> },
+ { 6, &run_offset_contribution_output_stage<false, true, true> },
+ { 7, &run_offset_contribution_output_stage<true, true, true> }
+ };
+
+ // Check if input is a 3D reinterpretation
+ const bool reinterpret_as_3d = vector_sum_row != nullptr
+ && mm_result->info()->num_dimensions() > 1
+ && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
+
+ // Check if we need to clamp the result using min and max
+ const bool is_bounded_relu = ((output_stage.gemmlowp_min_bound != output_stage.gemmlowp_max_bound)
+ && !(output_stage.gemmlowp_min_bound == 0 && output_stage.gemmlowp_max_bound == 255));
+
+ const bool is_fixed_point = output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN;
+
+ // key acts as a bitset, setting the first bit on reinterpret_as_3d,
+ // the second on is_bounded_relu, and the third on is_fixed_point.
+ uint8_t key = (reinterpret_as_3d ? 1UL : 0UL) | ((is_bounded_relu ? 1UL : 0UL) << 1) | ((is_fixed_point ? 1UL : 0UL) << 2);
+ return map_function.find(key)->second;
+}
+} // namespace
+
+NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageKernel()
+ : _function(nullptr), _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true),
+ _output_stage(GEMMLowpOutputStageInfo())
+
+{
+}
+
+void NEGEMMLowpOffsetContributionOutputStageKernel::configure(const ITensor *mm_result, const ITensor *vector_sum_col,
+ const ITensor *vector_sum_row, const ITensor *bias, ITensor *output, int32_t k,
+ int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
+{
+ // Perform validate step
+ ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
+
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
+ vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, // NOLINT
+ vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, // NOLINT
+ bias != nullptr ? bias->info() : nullptr, // NOLINT
+ output->info(), a_offset, b_offset, output_stage)); // NOLINT
+
+ _vector_sum_col = vector_sum_col;
+ _vector_sum_row = vector_sum_row;
+ _bias = bias;
+ _mm_result = mm_result;
+ _output = output;
+ _a_offset = a_offset;
+ _b_offset = b_offset;
+ _k_offset = a_offset * b_offset * k;
+ _output_stage = output_stage;
+
+ // If a_offset == 0, vector_sum_col can be a nullptr
+ if(a_offset != 0)
+ {
+ // Check if vector_sum_col_shape should be slidden or not
+ // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
+ // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
+ _slide_vector_sum_col = vector_sum_col->info()->tensor_shape().num_dimensions() > 1;
+ }
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(mm_result->info(), output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ INEKernel::configure(win_config.second);
+
+ _function = get_configured_function(mm_result, vector_sum_row, output_stage);
+}
+
+Status NEGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col,
+ const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
+ int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(), output->clone().get()).first);
+ return Status{};
+}
+
+void NEGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+ _function(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage);
+}
+
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
index f0ac695b20..d3cfc7a8fa 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
@@ -86,37 +86,6 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
namespace arm_compute
{
class Coordinates;
-
-/* Function used by the left-over for loop to perform the quantization */
-template <bool is_bounded_relu>
-inline uint8_t finalize_quantization(int32x4_t in_s32, int result_fixedpoint_multiplier, int32_t result_shift, int32x4_t result_offset_after_shift_s32, uint8_t min_u8, uint8_t max_u8)
-{
- const static int32x4_t zero_s32 = vdupq_n_s32(0);
- const static int32x4_t sat_value_s32 = vdupq_n_s32(255);
-
- // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar
- in_s32 = vqrdmulhq_n_s32(in_s32, result_fixedpoint_multiplier);
-
- // Round to the nearest division by a power-of-two using result_shift_s32
- in_s32 = rounding_divide_by_pow2(in_s32, result_shift);
-
- // Add the offset terms
- in_s32 = vaddq_s32(in_s32, result_offset_after_shift_s32);
-
- // Saturate negative values
- in_s32 = vmaxq_s32(in_s32, zero_s32);
- in_s32 = vminq_s32(in_s32, sat_value_s32);
-
- auto out_u8 = static_cast<uint8_t>(vgetq_lane_s32(in_s32, 0));
-
- if(is_bounded_relu)
- {
- out_u8 = std::max(out_u8, min_u8);
- out_u8 = std::min(out_u8, max_u8);
- }
-
- return out_u8;
-}
} // namespace arm_compute
template <bool is_bounded_relu>
@@ -188,10 +157,8 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run(const Window
// Add bias
in_value += bias_value;
-
// Finalize and store the result
- *(out.ptr() + x) = finalize_quantization<is_bounded_relu>(vdupq_n_s32(in_value), _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, static_cast<uint8_t>(_min),
- static_cast<uint8_t>(_max));
+ *(out.ptr() + x) = finalize_quantization<is_bounded_relu>(in_value, _result_fixedpoint_multiplier, _result_shift, _result_offset_after_shift, static_cast<uint8_t>(_min), static_cast<uint8_t>(_max));
}
},
in, out, bias);
@@ -220,10 +187,10 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run(const Window
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- const int32x4_t in_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+ const int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
// Finalize and store the result
- *(out.ptr() + x) = finalize_quantization<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, static_cast<uint8_t>(_min), static_cast<uint8_t>(_max));
+ *(out.ptr() + x) = finalize_quantization<is_bounded_relu>(in_value, _result_fixedpoint_multiplier, _result_shift, _result_offset_after_shift, static_cast<uint8_t>(_min), static_cast<uint8_t>(_max));
}
},
in, out);
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index be7cc2d0e1..b6c37349c1 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -90,16 +90,17 @@ void NEConvolutionLayerReshapeWeights::run()
}
NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager)
- : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _activationlayer_function(),
- _add_bias_kernel(), _reshape_layer(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _append_bias(false),
- _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false)
+ : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(), _activationlayer_function(), _add_bias_kernel(),
+ _reshape_layer(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _append_bias(false), _skip_im2col(false),
+ _skip_col2im(false), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false)
{
}
-void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *weights, ITensor *output, int gemm_3d_depth)
+void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act_info, int gemm_3d_depth)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
- ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), output->info(), gemm_3d_depth, _skip_im2col));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases == nullptr ? nullptr : biases->info(), output == nullptr ? nullptr : output->info(), act_info, gemm_3d_depth,
+ _skip_im2col));
const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
gemm_3d_depth, _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */);
@@ -114,7 +115,40 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w
input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
- _mm_gemmlowp.configure(input, weights, nullptr, output, gemm_info);
+ const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input_quantization_info : output->info()->quantization_info();
+
+ float multiplier = input_quantization_info.scale * weights->info()->quantization_info().scale / output_quant_info.scale;
+ int output_multiplier, output_shift;
+ quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+
+ // Merge activation with output stage
+ int min_activation = 0;
+ int max_activation = 0;
+
+ const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
+ ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
+ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
+ };
+ if(_is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0)
+ {
+ const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
+ const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
+
+ min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
+ max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
+
+ _is_activationlayer_enabled = false;
+ }
+
+ GEMMLowpOutputStageInfo output_info;
+ output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ output_info.gemmlowp_offset = output_quant_info.offset;
+ output_info.gemmlowp_multiplier = output_multiplier;
+ output_info.gemmlowp_shift = output_shift;
+ output_info.gemmlowp_min_bound = min_activation;
+ output_info.gemmlowp_max_bound = max_activation;
+
+ _mm_gemmlowp.configure(input, weights, biases, output, GEMMInfo(false, false, true, gemm_3d_depth, _skip_im2col, false, output_info));
// Revert back QuantizatioInfo as input and weights could be used in other convolution layers
input->info()->set_quantization_info(input_quantization_info);
@@ -127,9 +161,11 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w
}
}
-Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth, bool skip_im2col)
+Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const ActivationLayerInfo &act_info,
+ int gemm_3d_depth, bool skip_im2col)
{
- const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
+ const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
+ const bool is_activation_enabled = act_info.enabled();
const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */);
@@ -145,8 +181,39 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens
input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
+ const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input_quantization_info : output->quantization_info();
+
+ float multiplier = input_quantization_info.scale * weights->quantization_info().scale / output_quant_info.scale;
+ int output_multiplier, output_shift;
+ quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+
+ // Merge activation with output stage
+ int min_activation = 0;
+ int max_activation = 0;
+
+ const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
+ ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
+ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
+ };
+ if(is_activation_enabled && supported_acts.count(act_info.activation()) != 0)
+ {
+ const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
+ const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
+
+ min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
+ max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
+ }
+
+ GEMMLowpOutputStageInfo output_info;
+ output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ output_info.gemmlowp_offset = output_quant_info.offset;
+ output_info.gemmlowp_multiplier = output_multiplier;
+ output_info.gemmlowp_shift = output_shift;
+ output_info.gemmlowp_min_bound = min_activation;
+ output_info.gemmlowp_max_bound = max_activation;
+
// Perform validation step on GEMMLowp
- return NEGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), nullptr, output, gemm_info);
+ return NEGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, GEMMInfo(false, false, true, gemm_3d_depth, skip_im2col, false, output_info));
}
else
{
@@ -155,19 +222,18 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens
}
}
-Status NEGEMMConvolutionLayer::validate_gemm3d(DataType data_type, int gemm_3d_depth, bool skip_im2col)
+Status NEGEMMConvolutionLayer::validate_gemm3d(const ITensorInfo *input_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col)
{
- const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
- const DataType output_gemm_data_type = is_quantized ? DataType::S32 : data_type;
- const unsigned int mult_y = skip_im2col ? 1U : gemm_3d_depth;
- const unsigned int mult_z = skip_im2col ? gemm_3d_depth : 1U;
+ const DataType data_type = input_info->data_type();
+ const unsigned int mult_y = skip_im2col ? 1U : gemm_3d_depth;
+ const unsigned int mult_z = skip_im2col ? gemm_3d_depth : 1U;
// Set dummy tensor shapes for the validation
- const TensorInfo dummy_input_info(TensorShape(4U, 4U * mult_y, 1U * mult_z), 1, data_type);
+ const TensorInfo dummy_input_info(TensorShape(4U, 4U * mult_y, 1U * mult_z), 1, data_type, input_info->quantization_info());
const TensorInfo dummy_weights_info(TensorShape(4U, 4U), 1, data_type);
- const TensorInfo dummy_output_info(TensorShape(4U, 4U, gemm_3d_depth), 1, output_gemm_data_type);
+ const TensorInfo dummy_output_info(TensorShape(4U, 4U, gemm_3d_depth), 1, data_type, input_info->quantization_info());
- return validate_mm(&dummy_input_info, &dummy_weights_info, &dummy_output_info, gemm_3d_depth, skip_im2col);
+ return validate_mm(&dummy_input_info, &dummy_weights_info, nullptr, &dummy_output_info, act_info, gemm_3d_depth, skip_im2col);
}
void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
@@ -202,9 +268,8 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
_append_bias = (biases != nullptr) && (!_is_quantized);
_is_activationlayer_enabled = act_info.enabled();
- const ITensor *gemm_input_to_use = input;
- ITensor *gemm_output_to_use = output;
- ITensor *gemm_output_staged_to_use = output;
+ const ITensor *gemm_input_to_use = input;
+ ITensor *gemm_output_to_use = output;
// Get convolved dimensions
unsigned int conv_w = 0;
@@ -219,7 +284,7 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
// Check if GEMM3D is supported
if(data_layout == DataLayout::NHWC)
{
- _skip_col2im = bool(validate_gemm3d(input->info()->data_type(), conv_h, true));
+ _skip_col2im = bool(validate_gemm3d(input->info(), act_info, conv_h, true));
// If not supported, we need to perform im2col and col2im (or reshape layer)
if(!_skip_col2im)
{
@@ -262,26 +327,17 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
}
// Create temporary GEMM output tensor in case we cannot skip col2im
- if(!_skip_col2im || _is_quantized)
+ if(!_skip_col2im)
{
- // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
- const DataType gemm_data_type = _is_quantized ? DataType::S32 : data_type;
- TensorShape shape_gemm;
+ TensorShape shape_gemm;
- if(_is_quantized && _skip_col2im)
- {
- shape_gemm = output->info()->tensor_shape();
- }
- else
- {
- // Calculate GEMM output shape
- shape_gemm = _im2col_output.info()->tensor_shape();
- shape_gemm.set(0, mat_weights_cols);
- shape_gemm.set(1, conv_w * conv_h);
- }
+ // Calculate GEMM output shape
+ shape_gemm = _im2col_output.info()->tensor_shape();
+ shape_gemm.set(0, mat_weights_cols);
+ shape_gemm.set(1, conv_w * conv_h);
// FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
- TensorInfo info_gemm(shape_gemm, 1, gemm_data_type);
+ TensorInfo info_gemm(shape_gemm, 1, data_type);
info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
_gemm_output.allocator()->init(info_gemm);
_memory_group.manage(&_gemm_output);
@@ -293,62 +349,24 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
// Configure GEMM
// In case we need to skip col2im, GEMM3D (gemm_3d_depth != 0) must be called in order to avoid reshaping the output matrix
const unsigned int gemm_3d_depth = _skip_col2im ? conv_h : 0;
- configure_mm(gemm_input_to_use, &_weights_reshaped, gemm_output_to_use, gemm_3d_depth);
+ configure_mm(gemm_input_to_use, &_weights_reshaped, biases, gemm_output_to_use, act_info, gemm_3d_depth);
if(!_skip_im2col)
{
_im2col_output.allocator()->allocate();
}
- // Configure output stage for quantized case
- if(_is_quantized)
- {
- const QuantizationInfo input_quant_info = input->info()->quantization_info();
- const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input_quant_info : output->info()->quantization_info();
-
- float multiplier = input_quant_info.scale * weights->info()->quantization_info().scale / output_quant_info.scale;
- int output_multiplier, output_shift;
- quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
-
- if(!_skip_col2im)
- {
- _memory_group.manage(&_tmp_output);
- gemm_output_staged_to_use = &_tmp_output;
- }
-
- // Merge activation with output stage
- int min_activation = 0;
- int max_activation = 0;
-
- const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
- ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
- ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
- };
- if(_is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0)
- {
- const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
- const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
-
- min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
- max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
-
- _is_activationlayer_enabled = false;
- }
-
- _gemmlowp_output_stage.configure(gemm_output_to_use, biases, gemm_output_staged_to_use, output_multiplier, output_shift, output_quant_info.offset, min_activation, max_activation);
- }
-
if(!_skip_col2im)
{
if(_data_layout == DataLayout::NCHW)
{
// Configure col2im
- _col2im_kernel.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output, Size2D(conv_w, conv_h));
+ _col2im_kernel.configure(gemm_output_to_use, output, Size2D(conv_w, conv_h));
}
else
{
// Configure reshape layer
- _reshape_layer.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output);
+ _reshape_layer.configure(gemm_output_to_use, output);
}
}
@@ -395,10 +413,9 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
const unsigned int kernel_height = weights->dimension(idx_height);
TensorInfo im2col_reshaped_info, info_gemm, tmp_info, weights_reshaped_info;
- const ITensorInfo *gemm_input_to_use = input;
- const ITensorInfo *gemm_output_to_use = output;
- const ITensorInfo *gemm_output_staged_to_use = output;
- const ITensorInfo *weights_to_use = weights;
+ const ITensorInfo *gemm_input_to_use = input;
+ const ITensorInfo *gemm_output_to_use = output;
+ const ITensorInfo *weights_to_use = weights;
const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
const bool append_bias = (biases != nullptr) && (!is_quantized);
@@ -420,7 +437,7 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
bool skip_col2im = false;
if(data_layout == DataLayout::NHWC)
{
- skip_col2im = bool(validate_gemm3d(input->data_type(), conv_h, true));
+ skip_col2im = bool(validate_gemm3d(input, act_info, conv_h, true));
// If not supported, we need to perform im2col and col2im (or reshape layer)
if(!skip_col2im)
{
@@ -431,7 +448,7 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
if(skip_col2im)
{
// If not supported, we need to perform im2col and col2im (or reshape layer)
- if(!bool(validate_gemm3d(input->data_type(), conv_h, skip_im2col)))
+ if(!bool(validate_gemm3d(input, act_info, conv_h, skip_im2col)))
{
skip_im2col = false;
skip_col2im = false;
@@ -495,68 +512,25 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
}
// Create temporary GEMM output tensor in case we cannot skip col2im
- const DataType gemm_data_type = is_quantized ? DataType::S32 : data_type;
if(!skip_col2im)
{
TensorShape shape_gemm = gemm_input_to_use->tensor_shape();
shape_gemm.set(0, mat_weights_cols);
shape_gemm.set(1, conv_w * conv_h);
- info_gemm = TensorInfo(shape_gemm, 1, gemm_data_type);
+ info_gemm = TensorInfo(shape_gemm, 1, data_type);
}
else
{
- info_gemm = TensorInfo(output->tensor_shape(), 1, gemm_data_type);
+ info_gemm = TensorInfo(output->tensor_shape(), 1, data_type);
}
info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
gemm_output_to_use = &info_gemm;
-
- ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, gemm_output_to_use, skip_col2im ? conv_h : 0, skip_im2col));
-
- if(is_quantized)
- {
- const QuantizationInfo input_quant_info = input->quantization_info();
- const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input_quant_info : output->quantization_info();
- const float multiplier = input_quant_info.scale * weights_to_use->quantization_info().scale / output_quant_info.scale;
- int output_multiplier, output_shift;
- quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
-
- if(!skip_col2im)
- {
- tmp_info = TensorInfo(gemm_output_to_use->tensor_shape(), 1, DataType::QASYMM8);
- tmp_info.set_quantization_info(output->quantization_info()).set_data_layout(data_layout);
- gemm_output_staged_to_use = &tmp_info;
- }
-
- // Merge activation with output stage
- int min_activation = 0;
- int max_activation = 0;
-
- const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
- ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
- ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
- };
-
- if(is_activation_enabled && supported_acts.count(act_info.activation()) != 0)
- {
- const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
- const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
-
- min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
- max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
-
- is_activation_enabled = false;
- }
-
- // Validate output stage for quantized case
- NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(gemm_output_to_use, biases, gemm_output_staged_to_use, min_activation, max_activation);
- }
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases, gemm_output_to_use, act_info, skip_col2im ? conv_h : 0, skip_im2col));
// Validate Col2Im/ReshapeLayer
if(!skip_col2im && (data_layout == DataLayout::NCHW))
{
- ARM_COMPUTE_RETURN_ON_ERROR(NECol2ImKernel::validate(is_quantized ? gemm_output_staged_to_use : gemm_output_to_use,
- output,
- Size2D(conv_w, conv_h)));
+ ARM_COMPUTE_RETURN_ON_ERROR(NECol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h)));
}
//Validate Activation Layer
@@ -586,9 +560,6 @@ void NEGEMMConvolutionLayer::run()
{
// Run gemmlowp
_mm_gemmlowp.run();
-
- // Run output stage
- _gemmlowp_output_stage.run();
}
else
{
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 5286f113a5..85e49fd265 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -42,8 +42,8 @@ using namespace arm_compute::misc::shape_calculator;
NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(memory_manager), _asm_glue(memory_manager), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(),
- _offset_contribution_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _original_b(nullptr), _a_offset(0), _b_offset(0), _run_vector_matrix_multiplication(false),
- _dot_product_path(false), _reshape_b_only_on_first_run(false), _is_prepared(false)
+ _offset_contribution_kernel(), _offset_contribution_output_stage_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _mm_result_s32(), _original_b(nullptr), _a_offset(0), _b_offset(0),
+ _run_vector_matrix_multiplication(false), _dot_product_path(false), _reshape_b_only_on_first_run(false), _is_prepared(false), _fuse_output_stage(false)
{
}
@@ -53,6 +53,9 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
ARM_COMPUTE_UNUSED(c);
ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info));
+ const ITensor *matrix_a = a;
+ const ITensor *matrix_b = b;
+
// Clear state
_mtx_a_reshape_kernel = nullptr;
_mtx_b_reshape_kernel = nullptr;
@@ -65,6 +68,18 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
_is_prepared = false;
_original_b = b;
+ // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage
+ if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
+ {
+ _fuse_output_stage = true;
+
+ _memory_group.manage(&_mm_result_s32);
+
+ TensorInfo info_mm_result_s32(output->info()->tensor_shape(), 1, DataType::S32);
+
+ _mm_result_s32.allocator()->init(info_mm_result_s32);
+ }
+
#ifdef __aarch64__
switch(a->info()->data_type())
{
@@ -72,7 +87,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
case DataType::U8:
case DataType::S8:
{
- _asm_glue.configure(a, b, output, 1.f, 0.f, _reshape_b_only_on_first_run);
+ _asm_glue.configure(a, b, _fuse_output_stage ? &_mm_result_s32 : output, 1.f, 0.f, _reshape_b_only_on_first_run);
_dot_product_path = _asm_glue.is_configured();
break;
}
@@ -83,51 +98,35 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
}
}
#endif /* __aarch64__ */
- if(!_dot_product_path)
+ if(!(_dot_product_path || _run_vector_matrix_multiplication))
{
- if(_run_vector_matrix_multiplication)
+ matrix_a = &_tmp_a;
+ matrix_b = &_tmp_b;
+
+ // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ]
+ TensorInfo a_info(compute_interleaved_shape(*a->info()), 1, a->info()->data_type());
+ // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
+ TensorInfo b_info(compute_transpose1xW_shape(*b->info()), 1, b->info()->data_type());
+ _tmp_a.allocator()->init(a_info);
+ _tmp_b.allocator()->init(b_info);
+ _memory_group.manage(&_tmp_a);
+ if(!_reshape_b_only_on_first_run)
{
- // Configure matrix multiply kernel
- {
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
- k->configure(a, b, output);
- _mm_kernel = std::move(k);
- }
+ _memory_group.manage(&_tmp_b);
}
- else
- {
- // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ]
- TensorInfo info_a = a->info()->clone()->set_tensor_shape(compute_interleaved_shape(*a->info())).set_is_resizable(true);
- // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
- TensorInfo info_b = b->info()->clone()->set_tensor_shape(compute_transpose1xW_shape(*b->info())).set_is_resizable(true);
- _tmp_a.allocator()->init(info_a);
- _tmp_b.allocator()->init(info_b);
- _memory_group.manage(&_tmp_a);
- if(!_reshape_b_only_on_first_run)
- {
- _memory_group.manage(&_tmp_b);
- }
- // Configure interleave kernel
- {
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMInterleave4x4Kernel>();
- k->configure(a, &_tmp_a);
- _mtx_a_reshape_kernel = std::move(k);
- }
-
- // Configure transpose kernel
- {
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMTranspose1xWKernel>();
- k->configure(b, &_tmp_b);
- _mtx_b_reshape_kernel = std::move(k);
- }
+ // Configure interleave kernel
+ {
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMInterleave4x4Kernel>();
+ k->configure(a, &_tmp_a);
+ _mtx_a_reshape_kernel = std::move(k);
+ }
- // Configure matrix multiply kernel
- {
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
- k->configure(&_tmp_a, &_tmp_b, output);
- _mm_kernel = std::move(k);
- }
+ // Configure transpose kernel
+ {
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMTranspose1xWKernel>();
+ k->configure(b, &_tmp_b);
+ _mtx_b_reshape_kernel = std::move(k);
}
}
@@ -158,8 +157,33 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
_mtx_a_reduction_kernel.configure(a, &_vector_sum_row, a->info()->dimension(0), false);
}
- // Configure offset contribution kernel
- _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, a->info()->dimension(0), _a_offset, _b_offset);
+ if(_fuse_output_stage)
+ {
+ // Configure matrix multiply kernel
+ if(!_dot_product_path)
+ {
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
+ k->configure(matrix_a, matrix_b, &_mm_result_s32);
+ _mm_kernel = std::move(k);
+ }
+
+ _offset_contribution_output_stage_kernel.configure(&_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output, a->info()->dimension(0),
+ _a_offset, _b_offset, gemm_info.gemmlowp_output_stage());
+
+ _mm_result_s32.allocator()->allocate();
+ }
+ else
+ {
+ // Configure matrix multiply kernel
+ if(!_dot_product_path)
+ {
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
+ k->configure(matrix_a, matrix_b, output);
+ _mm_kernel = std::move(k);
+ }
+ // Configure offset contribution kernel
+ _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, a->info()->dimension(0), _a_offset, _b_offset);
+ }
// Allocate tensors
if(!_dot_product_path && !_run_vector_matrix_multiplication)
@@ -185,43 +209,53 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32, DataType::QASYMM8);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(c != nullptr, "Bias addition not supported in NEGEMMLowpMatrixMultiplyCore");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(c != nullptr && gemm_info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::NONE, "Bias addition not supported in NEGEMMLowpMatrixMultiplyCore for output S32");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(0) != (b)->dimension(1),
"The product AB is defined only if the number of columns in A is equal to the number of rows in B");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
+ const ITensorInfo *matrix_a_info = a;
+ const ITensorInfo *matrix_b_info = b;
+
+ TensorInfo tmp_a_info{};
+ TensorInfo tmp_b_info{};
+ TensorInfo mm_result_s32_info{};
+
int32_t a_offset = a->quantization_info().offset;
int32_t b_offset = b->quantization_info().offset;
const bool reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
+ bool fuse_output_stage = gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE;
+ if(fuse_output_stage)
+ {
+ auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(output->tensor_shape()).set_data_type(DataType::S32));
+ }
+
// Check if we need to run the optimized assembly kernel
- const bool run_optimised = bool(NEGEMMAssemblyDispatch::validate(a, b, output, 1.f, 0.f, reshape_b_only_on_first_run));
+ const bool run_optimised = bool(NEGEMMAssemblyDispatch::validate(a, b, fuse_output_stage ? &mm_result_s32_info : output, 1.f, 0.f, reshape_b_only_on_first_run));
if(run_optimised)
{
- if(output->total_size() != 0)
+ ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != output->dimension(0));
+ if(gemm_info.depth_output_gemm3d() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != output->dimension(0));
- if(gemm_info.depth_output_gemm3d() != 0)
+ if(gemm_info.reinterpret_input_as_3d())
{
- if(gemm_info.reinterpret_input_as_3d())
- {
- ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
- ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != output->dimension(2));
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1) * output->dimension(2));
- }
+ ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
+ ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != output->dimension(2));
}
else
{
- ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
+ ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1) * output->dimension(2));
}
}
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
+ }
}
else
{
@@ -231,6 +265,9 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
const bool run_vector_matrix_multiplication = a->dimension(1) < 2;
if(!run_vector_matrix_multiplication)
{
+ matrix_a_info = &tmp_a_info;
+ matrix_b_info = &tmp_b_info;
+
// The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ]
TensorShape shape_tmp_a = a->tensor_shape();
shape_tmp_a.set(0, a->dimension(0) * 4);
@@ -241,16 +278,12 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
shape_tmp_b.set(0, b->dimension(1) * 16);
shape_tmp_b.set(1, std::ceil(b->dimension(0) / 16.f));
- TensorInfo info_a = a->clone()->set_tensor_shape(shape_tmp_a).set_is_resizable(true);
- TensorInfo info_b = b->clone()->set_tensor_shape(shape_tmp_b).set_is_resizable(true);
+ // Validate interleave kernel
+ auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(shape_tmp_a));
+ auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(shape_tmp_b));
- ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a, &info_a));
- ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &info_b));
- ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(&info_a, &info_b, output));
- }
- else
- {
- ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(a, b, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a, &tmp_a_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &tmp_b_info));
}
}
@@ -274,12 +307,32 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row, a->dimension(0), false));
}
- // Validate offset contribution kernel
- ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionKernel::validate(output,
- a_offset == 0 ? nullptr : &info_vector_sum_col,
- b_offset == 0 ? nullptr : &info_vector_sum_row,
- a_offset, b_offset));
+ if(fuse_output_stage)
+ {
+ if(!run_optimised)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info));
+ }
+ // Validate offset contribution kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info,
+ a_offset == 0 ? nullptr : &info_vector_sum_col,
+ b_offset == 0 ? nullptr : &info_vector_sum_row,
+ c, output, a_offset, b_offset,
+ gemm_info.gemmlowp_output_stage()));
+ }
+ else
+ {
+ if(!run_optimised)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output));
+ }
+ // Validate offset contribution kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionKernel::validate(output,
+ a_offset == 0 ? nullptr : &info_vector_sum_col,
+ b_offset == 0 ? nullptr : &info_vector_sum_row,
+ a_offset, b_offset));
+ }
return Status{};
}
@@ -321,8 +374,16 @@ void NEGEMMLowpMatrixMultiplyCore::run()
NEScheduler::get().schedule(&_mtx_b_reduction_kernel, Window::DimX);
}
- // Run offset contribution kernel
- NEScheduler::get().schedule(&_offset_contribution_kernel, Window::DimY);
+ if(_fuse_output_stage)
+ {
+ // Run offset contribution kernel
+ NEScheduler::get().schedule(&_offset_contribution_output_stage_kernel, Window::DimY);
+ }
+ else
+ {
+ // Run offset contribution kernel
+ NEScheduler::get().schedule(&_offset_contribution_kernel, Window::DimY);
+ }
_memory_group.release();
}
diff --git a/tests/datasets/GEMMLowpFusedOffsetOutputDataset.h b/tests/datasets/GEMMLowpFusedOffsetOutputDataset.h
new file mode 100644
index 0000000000..c94019e3d5
--- /dev/null
+++ b/tests/datasets/GEMMLowpFusedOffsetOutputDataset.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_GEMMLOWPOUTPUT_DATASET
+#define ARM_COMPUTE_TEST_GEMMLOWPOUTPUT_DATASET
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Utils.h"
+
+using namespace arm_compute;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class GEMMLowpFusedOffsetOutputDataset
+{
+public:
+ using type = std::tuple<TensorShape, TensorShape, TensorShape, int32_t, int32_t, GEMMLowpOutputStageInfo>;
+
+ struct iterator
+ {
+ iterator(std::vector<TensorShape>::const_iterator a_it,
+ std::vector<TensorShape>::const_iterator b_it,
+ std::vector<TensorShape>::const_iterator c_it,
+ std::vector<int32_t>::const_iterator a_offset_it,
+ std::vector<int32_t>::const_iterator b_offset_it,
+ std::vector<GEMMLowpOutputStageInfo>::const_iterator output_stage_it)
+ : _a_it{ std::move(a_it) },
+ _b_it{ std::move(b_it) },
+ _c_it{ std::move(c_it) },
+ _a_offset_it{ std::move(a_offset_it) },
+ _b_offset_it{ std::move(b_offset_it) },
+ _output_stage_it{ std::move(output_stage_it) }
+ {
+ }
+
+ std::string description() const
+ {
+ std::stringstream description;
+ description << "A=" << *_a_it << ":";
+ description << "B=" << *_b_it << ":";
+ description << "C=" << *_c_it << ":";
+ description << "a_offset=" << *_a_offset_it << ":";
+ description << "b_offset=" << *_b_offset_it << ":";
+ description << "output_type=" << string_from_gemmlowp_output_stage((*_output_stage_it).type) << ":";
+ description << "output_offset=" << (*_output_stage_it).gemmlowp_offset << ":";
+ description << "output_multiplier=" << (*_output_stage_it).gemmlowp_multiplier << ":";
+ description << "output_shift=" << (*_output_stage_it).gemmlowp_shift << ":";
+ description << "output_min=" << (*_output_stage_it).gemmlowp_min_bound << ":";
+ description << "output_max=" << (*_output_stage_it).gemmlowp_max_bound << ":";
+
+ return description.str();
+ }
+
+ GEMMLowpFusedOffsetOutputDataset::type operator*() const
+ {
+ return std::make_tuple(*_a_it, *_b_it, *_c_it, *_a_offset_it, *_b_offset_it, *_output_stage_it);
+ }
+
+ iterator &operator++()
+ {
+ ++_a_it;
+ ++_b_it;
+ ++_c_it;
+ ++_a_offset_it;
+ ++_b_offset_it;
+ ++_output_stage_it;
+
+ return *this;
+ }
+
+ private:
+ std::vector<TensorShape>::const_iterator _a_it;
+ std::vector<TensorShape>::const_iterator _b_it;
+ std::vector<TensorShape>::const_iterator _c_it;
+ std::vector<int32_t>::const_iterator _a_offset_it;
+ std::vector<int32_t>::const_iterator _b_offset_it;
+ std::vector<GEMMLowpOutputStageInfo>::const_iterator _output_stage_it;
+ };
+
+ iterator begin() const
+ {
+ return iterator(_a_shapes.begin(), _b_shapes.begin(), _c_shapes.begin(), _a_offset.begin(), _b_offset.begin(), _output_stage.begin());
+ }
+
+ int size() const
+ {
+ return std::min(_a_shapes.size(), std::min(_b_shapes.size(), std::min(_c_shapes.size(), std::min(_a_offset.size(), std::min(_b_offset.size(), _output_stage.size())))));
+ }
+
+ void add_config(TensorShape a, TensorShape b, TensorShape c, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
+ {
+ _a_shapes.emplace_back(std::move(a));
+ _b_shapes.emplace_back(std::move(b));
+ _c_shapes.emplace_back(std::move(c));
+ _a_offset.emplace_back(std::move(a_offset));
+ _b_offset.emplace_back(std::move(b_offset));
+ _output_stage.emplace_back(std::move(output_stage));
+ }
+
+ GEMMLowpOutputStageInfo OutputStageInfo(GEMMLowpOutputStageType type, int32_t offset, int32_t multiplier, int32_t shift, int32_t min, int32_t max)
+ {
+ GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo();
+ output_stage.type = type;
+ output_stage.gemmlowp_offset = offset;
+ output_stage.gemmlowp_multiplier = multiplier;
+ output_stage.gemmlowp_shift = shift;
+ output_stage.gemmlowp_min_bound = min;
+ output_stage.gemmlowp_max_bound = max;
+ return output_stage;
+ }
+
+protected:
+ GEMMLowpFusedOffsetOutputDataset() = default;
+ GEMMLowpFusedOffsetOutputDataset(GEMMLowpFusedOffsetOutputDataset &&) = default;
+
+private:
+ std::vector<TensorShape> _a_shapes{};
+ std::vector<TensorShape> _b_shapes{};
+ std::vector<TensorShape> _c_shapes{};
+ std::vector<int32_t> _a_offset{};
+ std::vector<int32_t> _b_offset{};
+ std::vector<GEMMLowpOutputStageInfo> _output_stage{};
+};
+
+class SmallGEMMLowpFusedOffsetOutputDataset final : public GEMMLowpFusedOffsetOutputDataset
+{
+public:
+ SmallGEMMLowpFusedOffsetOutputDataset()
+ {
+ add_config(TensorShape(21U, 1U), TensorShape(43U, 21U), TensorShape(43U, 1U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -200, 2, 13, 10, 210));
+ add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -100, 2, 13, 10, 210));
+ add_config(TensorShape(31U, 3U), TensorShape(72U, 31U), TensorShape(72U, 3U), -2, 13, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 0, 2, 13, 10, 210));
+ add_config(TensorShape(52U, 13U), TensorShape(33U, 52U), TensorShape(33U, 13U), 0, 4, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 100, 2, 13, 10, 210));
+ add_config(TensorShape(52U, 26U), TensorShape(33U, 52U), TensorShape(33U, 26U), -2, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 0, 2, 13, 10, 210));
+ add_config(TensorShape(31U, 27U), TensorShape(23U, 31U), TensorShape(23U, 27U), 18, 23, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 200, 2, 13, 10, 210));
+ add_config(TensorShape(38U, 43U), TensorShape(21U, 38U), TensorShape(21U, 43U), -3, -2, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -200, 2, 13, 10, 210));
+ add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -100, 2, 13, 10, 210));
+
+ add_config(TensorShape(21U, 1U), TensorShape(43U, 21U), TensorShape(43U, 1U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, -2, 254601600, 10, 10, 210));
+ add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, -1, 254601600, 10, 10, 210));
+ add_config(TensorShape(31U, 3U), TensorShape(72U, 31U), TensorShape(72U, 3U), -2, 13, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, 0, 254601600, 10, 10, 210));
+ add_config(TensorShape(52U, 26U), TensorShape(33U, 52U), TensorShape(33U, 26U), -2, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, 1, 254601600, 10, 10, 210));
+ add_config(TensorShape(31U, 27U), TensorShape(23U, 31U), TensorShape(23U, 27U), 5, 13, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, 2, 254601602, 10, 10, 210));
+ add_config(TensorShape(38U, 43U), TensorShape(21U, 38U), TensorShape(21U, 43U), -3, -2, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, -2, 254601602, 10, 10, 210));
+ add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, -1, 254601602, 10, 10, 210));
+ }
+};
+
+class LargeGEMMLowpFusedOffsetOutputDataset final : public GEMMLowpFusedOffsetOutputDataset
+{
+public:
+ LargeGEMMLowpFusedOffsetOutputDataset()
+ {
+ add_config(TensorShape(923U, 1U), TensorShape(871U, 923U), TensorShape(871U, 1U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -200, 2, 18, 10, 210));
+ add_config(TensorShape(923U, 429U), TensorShape(871U, 923U), TensorShape(871U, 429U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -100, 2, 18, 10, 210));
+ add_config(TensorShape(873U, 7U), TensorShape(784U, 873U), TensorShape(784U, 7U), -1, 3, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 0, 2, 18, 10, 210));
+ add_config(TensorShape(873U, 513U), TensorShape(784U, 873U), TensorShape(784U, 513U), 0, 4, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 100, 2, 18, 10, 210));
+ add_config(TensorShape(697U, 872U), TensorShape(563U, 697U), TensorShape(563U, 872U), -2, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 0, 2, 18, 10, 210));
+ add_config(TensorShape(1021U, 973U), TensorShape(783U, 1021U), TensorShape(783U, 973U), 5, 13, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 200, 2, 18, 10, 210));
+ add_config(TensorShape(681U, 1023U), TensorShape(213U, 681U), TensorShape(213U, 1023U), -3, -2, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -200, 2, 18, 10, 210));
+ add_config(TensorShape(941U, 1011U), TensorShape(623U, 941U), TensorShape(623U, 1011U), -9, 1, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -100, 2, 18, 10, 210));
+
+ add_config(TensorShape(923U, 1U), TensorShape(871U, 923U), TensorShape(871U, 1U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, -2, 254601600, 15, 10, 210));
+ add_config(TensorShape(923U, 429U), TensorShape(871U, 923U), TensorShape(871U, 429U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, -1, 254601600, 15, 10, 210));
+ add_config(TensorShape(873U, 7U), TensorShape(784U, 873U), TensorShape(784U, 7U), -1, 3, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, 0, 254601600, 15, 10, 210));
+ add_config(TensorShape(873U, 513U), TensorShape(784U, 873U), TensorShape(784U, 513U), 0, 4, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, 1, 254601600, 15, 10, 210));
+ add_config(TensorShape(697U, 872U), TensorShape(563U, 697U), TensorShape(563U, 872U), -2, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, 2, 254601602, 15, 10, 210));
+ add_config(TensorShape(1021U, 973U), TensorShape(783U, 1021U), TensorShape(783U, 973U), 5, 13, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, -2, 254601602, 15, 10, 210));
+ add_config(TensorShape(681U, 1023U), TensorShape(213U, 681U), TensorShape(213U, 1023U), -3, -2, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, -1, 254601602, 15, 10, 210));
+ }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_GEMMLOWPOUTPUT_DATASET */
diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp
index 08641dbaa3..efefbd645b 100644
--- a/tests/validation/CL/GEMMLowp.cpp
+++ b/tests/validation/CL/GEMMLowp.cpp
@@ -28,6 +28,7 @@
#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
#include "tests/CL/CLAccessor.h"
#include "tests/PaddingCalculator.h"
+#include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
#include "tests/datasets/LargeGEMMLowpDataset.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/datasets/SmallGEMMLowpDataset.h"
@@ -83,6 +84,21 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFixture, framework:
validate(CLAccessor(_target), _reference);
}
+using CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
+TEST_SUITE(FusedOffsetOutput)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpFusedOffsetOutputDataset())
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpFusedOffsetOutputDataset())
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // FusedOffsetOutput
+
TEST_SUITE(Output3D)
using CLGEMMLowpMatrixMultiplyCoreOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, false, true>;
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpOutput3DDataset())
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 57067f140f..f0460b4a23 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -30,6 +30,7 @@
#include "tests/NEON/Accessor.h"
#include "tests/NEON/Helper.h"
#include "tests/PaddingCalculator.h"
+#include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
#include "tests/datasets/LargeGEMMLowpDataset.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/datasets/SmallGEMMLowpDataset.h"
@@ -144,6 +145,20 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFixture, framework:
validate(Accessor(_target), _reference);
}
+using NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
+TEST_SUITE(FusedOffsetOutput)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpFusedOffsetOutputDataset())
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpFusedOffsetOutputDataset())
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FusedOffsetOutput
TEST_SUITE_END() // MatrixMultiplyCore
TEST_SUITE(OutputStage)
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index 836f8eddfe..90a4b5cf40 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -42,86 +42,164 @@ namespace test
{
namespace validation
{
-template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
-class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
+namespace
{
-public:
- template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, int32_t a_offset, int32_t b_offset)
+template <typename U>
+void fill(U &&tensor, int i)
+{
+ // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
+ std::uniform_int_distribution<> distribution(1, 254);
+ library->fill(tensor, distribution, i);
+}
+
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false>
+TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
+ GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo())
+{
+ // Create tensors
+ TensorType a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1);
+ TensorType output = create_tensor<TensorType>(shape_output, output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : DataType::QASYMM8, 1);
+
+ a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
+ b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
+
+ TensorType bias;
+ if(is_fused)
{
- _target = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset);
- _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset);
+ TensorShape bias_shape(shape_b[0]);
+ bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1);
}
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
+ // Create and configure function
+ // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
+ FunctionType gemmlowp;
+ // TODO (COMPMID-1672) - Extending the test to validate add bias in offset contribution
+ gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false, output_stage));
+
+ ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ output.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(a), 0);
+ fill(AccessorType(b), 1);
+
+ if(is_fused)
{
- // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
- std::uniform_int_distribution<> distribution(1, 254);
- library->fill(tensor, distribution, i);
+ ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ bias.allocator()->allocate();
+ ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ fill(AccessorType(bias), 2);
}
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, int32_t a_offset, int32_t b_offset)
+ // Compute GEMM function
+ gemmlowp.run();
+ return output;
+}
+
+template <bool reinterpret_input_as_3d>
+SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
+{
+ TensorShape shape_a_to_use = shape_a;
+ if(reinterpret_input_as_3d)
{
- // Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1);
- TensorType b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1);
- TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
+ // Collapse the second and third dimension if the input is 3D
+ shape_a_to_use.collapse(2U, 1U);
+ }
- a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
- b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
+ // Create reference
+ SimpleTensor<uint8_t> a{ shape_a_to_use, DataType::QASYMM8, 1 };
+ SimpleTensor<uint8_t> b{ shape_b, DataType::QASYMM8, 1 };
- // Create and configure function
- // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
- FunctionType gemmlowp;
- // TODO (COMPMID-1672) - Extending the test to validate add bias in offset contribution
- gemmlowp.configure(&a, &b, nullptr, &c, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_c[2] : 0), reinterpret_input_as_3d));
+ // Fill reference
+ fill(a, 0);
+ fill(b, 1);
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(a, b, shape_output, a_offset, b_offset);
+}
+}
- // Allocate tensors
- a.allocator()->allocate();
- b.allocator()->allocate();
- c.allocator()->allocate();
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
+class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
+ {
+ _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset);
+ _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset);
+ }
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+protected:
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
+ {
+ return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t>(shape_a, shape_b, shape_output, a_offset, b_offset);
+ }
- // Fill tensors
- fill(AccessorType(a), 0);
- fill(AccessorType(b), 1);
+ SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
+ {
+ return compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset);
+ }
- // Compute GEMM function
- gemmlowp.run();
- return c;
+ TensorType _target{};
+ SimpleTensor<int32_t> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
+class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
+ {
+ ARM_COMPUTE_EXPECT(output_stage.type != GEMMLowpOutputStageType::NONE, framework::LogLevel::ERRORS);
+ _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage);
+ _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage);
}
- SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, int32_t a_offset, int32_t b_offset)
+protected:
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
{
- TensorShape shape_a_to_use = shape_a;
- if(reinterpret_input_as_3d)
- {
- // Collapse the second and third dimension if the input is 3D
- shape_a_to_use.collapse(2U, 1U);
- }
+ return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true>(shape_a, shape_b, shape_output, a_offset, b_offset,
+ output_stage);
+ }
- // Create reference
- SimpleTensor<uint8_t> a{ shape_a_to_use, DataType::QASYMM8, 1 };
- SimpleTensor<uint8_t> b{ shape_b, DataType::QASYMM8, 1 };
+ SimpleTensor<qasymm8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
+ GEMMLowpOutputStageInfo output_stage)
+ {
+ SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset);
- // Fill reference
- fill(a, 0);
- fill(b, 1);
+ TensorShape bias_shape(shape_b[0]);
+ SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 };
+ fill(bias, 2);
- return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(a, b, shape_c, a_offset, b_offset);
+ switch(output_stage.type)
+ {
+ case GEMMLowpOutputStageType::QUANTIZE_DOWN:
+ return reference::gemmlowp_quantize_down_int32_to_uint8_scale<int32_t>(output, bias,
+ output_stage.gemmlowp_offset, output_stage.gemmlowp_multiplier, output_stage.gemmlowp_shift, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
+ break;
+ case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
+ return reference::gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint<int32_t>(output, bias,
+ output_stage.gemmlowp_multiplier, output_stage.gemmlowp_shift, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not Supported!");
+ }
}
- TensorType _target{};
- SimpleTensor<int32_t> _reference{};
+ TensorType _target{};
+ SimpleTensor<qasymm8_t> _reference{};
};
template <typename TensorType, typename AccessorType, typename FunctionType>
@@ -536,4 +614,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */ \ No newline at end of file
+#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */