aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChunosov <N.Chunosov@yandex.ru>2017-11-22 20:42:13 +0700
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commit5124be5d1caa70964d452cf9a8cc7c67df31fa9d (patch)
tree77d74963e9c3f52050cbc264a692133395182e98
parent9873ea3f1ea238ba7abfb635807614517c52be4b (diff)
downloadComputeLibrary-5124be5d1caa70964d452cf9a8cc7c67df31fa9d.tar.gz
COMPMID-661: Convolution quantized (#32)
Change-Id: Id69df4ce98d1d89bdf9c9aa5c4d909659909b30f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110456 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h1
-rw-r--r--arm_compute/core/Types.h52
-rw-r--r--arm_compute/runtime/CL/functions/CLConvolutionLayer.h60
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h12
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h20
-rw-r--r--src/core/CL/cl_kernels/convolution_layer.cl19
-rw-r--r--src/core/CL/cl_kernels/gemmlowp.cl8
-rw-r--r--src/core/CL/kernels/CLCol2ImKernel.cpp18
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp1
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp24
-rw-r--r--src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp26
-rw-r--r--src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLIm2ColKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLWeightsReshapeKernel.cpp22
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp24
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp24
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp153
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp11
-rw-r--r--src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp39
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp10
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp55
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp2
-rw-r--r--tests/validation/CL/FullyConnectedLayer.cpp7
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h121
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h2
27 files changed, 521 insertions, 200 deletions
diff --git a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
index ec8940ef7e..6c84ded49e 100644
--- a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
+++ b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
@@ -50,6 +50,7 @@ public:
* and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QS8/QS16/QASYMM8/F16/F32
* @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
* dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input
+ * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
* @param[out] output The output tensor. Should be a 2D Tensor. Data types supported: Same as @p input
*/
void configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output);
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index c77f1d4157..beaec143ef 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -827,6 +827,58 @@ private:
const unsigned int _num_kernels;
};
+/** GEMM Information class. This class stores the necessary information to compute GEMM functions */
+class GEMMInfo
+{
+public:
+ /** Default constructor */
+ GEMMInfo()
+ : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(false)
+ {
+ }
+ /** Constructor
+ *
+ * @param[in] is_a_reshaped True if the matrix A has been reshaped
+ * @param[in] is_b_reshaped True if the matrix B has been reshaped
+ * @param[in] reshape_b_only_on_first_run Reshape matrix B only for the first run
+ */
+ GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run)
+ : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run)
+ {
+ }
+ /** Flag which specifies if the matrix A has been reshaped
+ *
+ * @return True if the matrix A has been reshaped
+ */
+ bool is_a_reshaped() const
+ {
+ return _is_a_reshaped;
+ };
+ /** Flag which specifies if the matrix B has been reshaped
+ *
+ * @return True if the matrix B has been reshaped
+ */
+ bool is_b_reshaped() const
+ {
+ return _is_b_reshaped;
+ };
+ /** Flag which specifies if the reshape of matrix B should executed only for the first
+ *
+ * @note This flag could be set to TRUE when GEMM is used to accelerate convolution layer
+ *
+ * @return True if the reshaped of matrix B happens only for the first run
+ */
+ bool reshape_b_only_on_first_run() const
+ {
+ return _reshape_b_only_on_first_run;
+ };
+
+private:
+ const bool _is_a_reshaped;
+ const bool _is_b_reshaped;
+ const bool _reshape_b_only_on_first_run;
+};
+
/** IO formatting information class*/
struct IOFormatInfo
{
diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
index cd1ea70a23..74c124421a 100644
--- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
@@ -36,6 +36,8 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include <memory>
@@ -55,7 +57,8 @@ public:
CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QS8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
+ * Data type supported: QS8/QASYMM8/QS16/F16/F32.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
* @param[out] output Destination tensor. Data types supported: Same as @p weights.
* @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
@@ -79,7 +82,8 @@ private:
* -# @ref CLGEMMTranspose1xWKernel (executed only once for each configuration)
* -# @ref CLIm2ColKernel
* -# @ref CLGEMMInterleave4x4Kernel
- * -# @ref CLGEMMMatrixMultiplyKernel
+ * -# @ref CLGEMMMatrixMultiplyKernel or @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric)
+ * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8Scale (if quantized asymmetric)
* -# @ref CLCol2ImKernel
*/
class CLConvolutionLayer : public IFunction
@@ -91,9 +95,10 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QS16/F16/F32.
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
- * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported:Same as @p input.
+ * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
* @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
@@ -106,20 +111,37 @@ public:
void run() override;
private:
- CLMemoryGroup _memory_group;
- CLConvolutionLayerReshapeWeights _reshape_weights;
- CLIm2ColKernel _input_im2col_kernel;
- CLGEMMInterleave4x4Kernel _input_interleave_kernel;
- CLGEMMMatrixMultiplyKernel _mm_kernel;
- CLCol2ImKernel _output_col2im_kernel;
- CLTensor _input_im2col_reshaped;
- CLTensor _input_interleaved_reshaped;
- CLTensor _weights_reshaped;
- CLTensor _weights_transposed;
- CLTensor _gemm_output;
- bool _has_bias;
- bool _is_fully_connected_convolution;
- bool _are_weights_reshaped;
+ /** Configures the appropriate matrix multiply routine
+ *
+ * @param input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param weights Weights tensor. Data type supported: Same as @p input.
+ * @param output Output tensor. Data types supported: Same as @p input,
+ * except for input of QASYMM8 type where output should be of S32 type.
+ * @param is_interleaved_transposed Flag that signals if matrix is interleaved transposed
+ */
+ void configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed = true);
+
+private:
+ CLMemoryGroup _memory_group;
+ CLConvolutionLayerReshapeWeights _reshape_weights;
+ CLIm2ColKernel _input_im2col_kernel;
+ CLGEMMInterleave4x4Kernel _input_interleave_kernel;
+ CLGEMMMatrixMultiplyKernel _mm_kernel;
+ CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
+ CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
+ CLCol2ImKernel _output_col2im_kernel;
+
+ CLTensor _input_im2col_reshaped;
+ CLTensor _input_interleaved_reshaped;
+ CLTensor _weights_reshaped;
+ CLTensor _weights_transposed;
+ CLTensor _gemm_output;
+ CLTensor _tmp_output;
+
+ bool _append_bias;
+ bool _is_fully_connected_convolution;
+ bool _are_weights_reshaped;
+ bool _is_quantized;
};
}
#endif /* __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h b/arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h
index 04f55c1ee4..ae05b0fd9c 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h
@@ -40,11 +40,11 @@ class CLGEMMInterleave4x4 : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs, output
*
- * @param[in] input First input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] input First input tensor. Data types supported: U8/S8/QS8/QASYMM8/QS16/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: same as @p input
*/
void configure(const ICLTensor *input, ICLTensor *output);
};
}
-#endif /* __ARM_COMPUTE_CLGEMMINTERLEAVE4X4_H__ */ \ No newline at end of file
+#endif /* __ARM_COMPUTE_CLGEMMINTERLEAVE4X4_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
index 9944afeac7..e316144548 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
@@ -62,11 +62,13 @@ public:
* -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
* -# Compute the matrix product of the resulting a * b in int32.
*
- * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
+ * if the reshape of matrix B should be executed only for the first run
*/
- void configure(const ICLTensor *a, const ICLTensor *b, ICLTensor *output);
+ void configure(const ICLTensor *a, const ICLTensor *b, ICLTensor *output, const GEMMInfo &gemm_info = GEMMInfo());
// Inherited methods overridden:
void run() override;
@@ -86,6 +88,8 @@ private:
int32_t _a_offset;
int32_t _b_offset;
bool _is_interleaved_transposed;
+ bool _is_first_run;
+ bool _reshape_b_only_on_first_run;
};
}
#endif /*__ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYCORE_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h b/arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h
index 3d02aa931e..ae56548c27 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h
@@ -38,7 +38,7 @@ class CLGEMMTranspose1xW : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs, output
*
- * @param[in] input First input tensor. Data type supported: U8/S8/QS8/QASYMM8/U16/S16/F16/U32/S32/F32/
+ * @param[in] input First input tensor. Data type supported: U8/S8/QS8/QASYMM8/QS16/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: same as @p input
*/
void configure(const ICLTensor *input, ICLTensor *output);
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index cc513ade10..a4f35117c0 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -66,20 +66,24 @@ public:
* -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
* -# Compute the matrix product of the resulting a * b in int32.
*
- * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
+ * if the reshape of matrix B should be executed only for the first run
*/
- void configure(const ITensor *a, const ITensor *b, ITensor *output);
+ void configure(const ITensor *a, const ITensor *b, ITensor *output, const GEMMInfo &gemm_info = GEMMInfo());
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixMultiplyCore
*
- * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
+ * if the reshape of matrix B should be executed only for the first run
*
* @return an error status
*/
- static Error validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output);
+ static Error validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info = GEMMInfo());
// Inherited methods overridden:
void run() override;
diff --git a/src/core/CL/cl_kernels/convolution_layer.cl b/src/core/CL/cl_kernels/convolution_layer.cl
index ce0849bf7a..77b9b64945 100644
--- a/src/core/CL/cl_kernels/convolution_layer.cl
+++ b/src/core/CL/cl_kernels/convolution_layer.cl
@@ -97,13 +97,14 @@ __kernel void reshape_to_columns(
}
}
-#if defined(CONVOLVED_WIDTH) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(PAD_RIGHT) && defined(PAD_BOTTOM) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(KERNEL_DEPTH) && defined(SRC_WIDTH) && defined(SRC_HEIGHT)
+#if defined(CONVOLVED_WIDTH) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(PAD_RIGHT) && defined(PAD_BOTTOM) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(KERNEL_DEPTH) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(PAD_VALUE)
/** This kernel performs a reshaping of the input tensor to a tensor used to perform convolution using GEMM.
*
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The value to use for the paddings must be passed at compile time using -DPAD_VALUE: e.g. -DPAD_VALUE=0
* @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QS16/F16/F32
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QASYMM8/QS16/F16/F32
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
@@ -149,14 +150,10 @@ __kernel void im2col_generic(
{
#if PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0
*output_ptr = *((__global DATA_TYPE *)(input_ptr + x * src_stride_x + y * src_stride_y));
-#else // PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0
+#else // PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0
if(x < 0 || x >= SRC_WIDTH || y < 0 || y >= SRC_HEIGHT)
{
-#if defined(OFFSET)
- *output_ptr = OFFSET;
-#else /* OFFSET */
- *output_ptr = 0;
-#endif /* OFFSET */
+ *output_ptr = PAD_VALUE;
}
else
{
@@ -183,7 +180,7 @@ __kernel void im2col_generic(
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
* @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QS16/F16/F32
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QASYMM8/QS16/F16/F32
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
@@ -252,7 +249,7 @@ __kernel void im2col_kernel3x3_padx0_pady0(
*
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QS16/F16/F32
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QASYMM8/QS16/F16/F32
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
@@ -291,7 +288,7 @@ __kernel void col2im(
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
* @note In case biases will be added in late stage, -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/F16/F32
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QASYMM8/QS16/F16/F32
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index a8e8e600fe..a92881320e 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -380,6 +380,7 @@ __kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
* @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
* @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
* @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
+ * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
*
* The final result is:
*
@@ -429,7 +430,12 @@ __kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result)
Image sum_col = CONVERT_TO_IMAGE_STRUCT(sum_col);
// Compute the offset contribution due to A_OFFSET
+#if defined(SUM_COL_HAS_BATCHES)
+ a_offset_s32 = vload16(0, (__global int *)(sum_col.ptr + get_global_id(2) * sum_col_stride_y));
+#else // defined(MATRIX_B_HAS_BATCHES)
a_offset_s32 = vload16(0, (__global int *)(sum_col.ptr));
+#endif // defined(MATRIX_B_HAS_BATCHES)
+
a_offset_s32 *= (int16)A_OFFSET;
#endif // defined(A_OFFSET)
@@ -615,4 +621,4 @@ __kernel void gemmlowp_output_stage_quantize_down_fixedpoint(TENSOR3D_DECLARATIO
// Store the result
vstore16(res, 0, dst.ptr);
}
-#endif // defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT) \ No newline at end of file
+#endif // defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
diff --git a/src/core/CL/kernels/CLCol2ImKernel.cpp b/src/core/CL/kernels/CLCol2ImKernel.cpp
index f2886c569a..499e1e8fe0 100644
--- a/src/core/CL/kernels/CLCol2ImKernel.cpp
+++ b/src/core/CL/kernels/CLCol2ImKernel.cpp
@@ -43,7 +43,7 @@ CLCol2ImKernel::CLCol2ImKernel()
void CLCol2ImKernel::configure(const ICLTensor *input, ICLTensor *output, std::pair<unsigned int, unsigned int> convolved_dims)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::QASYMM8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
TensorShape output_shape = input->info()->tensor_shape();
@@ -52,7 +52,7 @@ void CLCol2ImKernel::configure(const ICLTensor *input, ICLTensor *output, std::p
output_shape.set(2, input->info()->tensor_shape()[0]);
// Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position(), input->info()->quantization_info());
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
@@ -62,15 +62,15 @@ void CLCol2ImKernel::configure(const ICLTensor *input, ICLTensor *output, std::p
_output = output;
_convolved_dims = convolved_dims;
+ const DataType data_type = input->info()->data_type();
+
// Create kernel
- std::set<std::string> build_opts = { ("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())) };
- build_opts.emplace("-DWIDTH_OUTPUT=" + support::cpp11::to_string(_convolved_dims.first));
- if(is_data_type_fixed_point(input->info()->data_type()))
- {
- build_opts.emplace("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
- }
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_opts.add_option("-DWIDTH_OUTPUT=" + support::cpp11::to_string(_convolved_dims.first));
+ build_opts.add_option_if(is_data_type_fixed_point(data_type), "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("col2im", build_opts));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("col2im", build_opts.options()));
// Configure the local work size for Bifrost with a value obtained
// via exhaustive autotuning over 30 representative tensor shapes.
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
index d49aed3171..2877a74be8 100644
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
@@ -63,6 +63,7 @@ void CLGEMMLowpOffsetContributionKernel::configure(ICLTensor *mm_result, const I
ARM_COMPUTE_ERROR_ON(vector_sum_col->info()->dimension(0) != mm_result->info()->dimension(0));
build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
+ build_opts.add_option_if(vector_sum_col->info()->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
}
// If b_offset == 0, vector_sum_row can be a nullptr
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
index 37a430e8b0..62288cb771 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
@@ -41,8 +41,6 @@ namespace
Error validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(max > 255);
ARM_COMPUTE_RETURN_ERROR_ON(min < 0 || min > max);
@@ -53,6 +51,13 @@ Error validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, cons
ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
}
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ }
+
return Error{};
}
@@ -64,11 +69,17 @@ std::pair<Error, Window> validate_and_configure_window(ITensorInfo *input, ITens
Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
bool window_changed = update_window_and_padding(win,
- input_access,
- output_result_access);
+ input_access);
+
+ if(output->total_size() != 0)
+ {
+ AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
+ window_changed = window_changed || update_window_and_padding(win, output_result_access);
+
+ output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ }
if(bias != nullptr)
{
@@ -76,8 +87,6 @@ std::pair<Error, Window> validate_and_configure_window(ITensorInfo *input, ITens
window_changed = window_changed || update_window_and_padding(win, bias_access);
}
- output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
Error err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Error{};
return std::make_pair(err, win);
}
@@ -93,6 +102,7 @@ CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::CLGEMMLowpQuantizeDow
Error CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(),
(bias != nullptr) ? bias->clone().get() : nullptr,
diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
index 343c31c73d..5d4b25c142 100644
--- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
@@ -41,8 +41,6 @@ namespace
Error validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(max > 255);
ARM_COMPUTE_RETURN_ERROR_ON(min < 0 || min > max);
@@ -53,6 +51,13 @@ Error validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, cons
ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
}
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ }
+
return Error{};
}
@@ -64,11 +69,17 @@ std::pair<Error, Window> validate_and_configure_window(ITensorInfo *input, ITens
Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
bool window_changed = update_window_and_padding(win,
- input_access,
- output_result_access);
+ input_access);
+
+ if(output->total_size() != 0)
+ {
+ AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
+ window_changed = window_changed || update_window_and_padding(win, output_result_access);
+
+ output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ }
if(bias != nullptr)
{
@@ -76,8 +87,6 @@ std::pair<Error, Window> validate_and_configure_window(ITensorInfo *input, ITens
window_changed = window_changed || update_window_and_padding(win, bias_access);
}
- output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
Error err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Error{};
return std::make_pair(err, win);
}
@@ -92,6 +101,7 @@ CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::CLGEMMLowpQuantizeDownInt32ToUint
}
Error CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(),
(bias != nullptr) ? bias->clone().get() : nullptr,
@@ -163,4 +173,4 @@ void CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window, cl
enqueue(queue, *this, slice);
}
while(collapsed.slide_window_slice_3D(slice));
-} \ No newline at end of file
+}
diff --git a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
index 6f410d3b14..bcf04b0982 100644
--- a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
@@ -126,7 +126,7 @@ void CLGEMMLowpMatrixBReductionKernel::configure(const ICLTensor *mtx_b, ICLTens
// Configure kernel window
Window win = calculate_max_window(*vector_sum_col->info(), Steps(num_elems_processed_per_iteration));
- AccessWindowStatic input_access(_input->info(), 0, 0, ceil_to_multiple(_input->info()->dimension(0), 16), _input->info()->dimension(1));
+ AccessWindowStatic input_access(_input->info(), 0, 0, ceil_to_multiple(_input->info()->dimension(0), num_elems_processed_per_iteration), _input->info()->dimension(1));
AccessWindowHorizontal output_access(_output->info(), 0, num_elems_processed_per_iteration);
update_window_and_padding(win,
diff --git a/src/core/CL/kernels/CLIm2ColKernel.cpp b/src/core/CL/kernels/CLIm2ColKernel.cpp
index f7cf9a3cb4..6514d6cf91 100644
--- a/src/core/CL/kernels/CLIm2ColKernel.cpp
+++ b/src/core/CL/kernels/CLIm2ColKernel.cpp
@@ -61,7 +61,6 @@ void CLIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const
build_opts.add_option(("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
build_opts.add_option_if(has_bias, "-DHAS_BIAS");
build_opts.add_option_if(is_data_type_fixed_point(data_type), "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
- build_opts.add_option_if(is_data_type_quantized_asymmetric(data_type), "-DOFFSET=" + support::cpp11::to_string(input->info()->quantization_info().offset));
int stride_x = 0;
int stride_y = 0;
@@ -95,6 +94,7 @@ void CLIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const
build_opts.add_option("-DPAD_BOTTOM=" + support::cpp11::to_string(conv_info.pad_bottom()));
build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
+ build_opts.add_option_if_else(is_data_type_quantized(data_type), "-DPAD_VALUE=" + support::cpp11::to_string(input->info()->quantization_info().offset), "-DPAD_VALUE=0");
if(kernel_dims.width == 3 && kernel_dims.height == 3 && !conv_info.has_padding())
{
diff --git a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp b/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
index be633b2418..3a9a32e58f 100644
--- a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
+++ b/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
@@ -41,12 +41,12 @@ CLWeightsReshapeKernel::CLWeightsReshapeKernel()
void CLWeightsReshapeKernel::configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::QASYMM8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
- const DataType dt = input->info()->data_type();
- const int fixed_point_position = input->info()->fixed_point_position();
+ const DataType data_type = input->info()->data_type();
+ // Calculate output shape
TensorShape output_shape{ input->info()->tensor_shape() };
output_shape.collapse(3);
const size_t tmp_dim = output_shape[0];
@@ -54,7 +54,7 @@ void CLWeightsReshapeKernel::configure(const ICLTensor *input, const ICLTensor *
output_shape.set(1, tmp_dim + (biases != nullptr ? 1 : 0));
// Output tensor auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), output_shape, 1, dt, fixed_point_position, input->info()->quantization_info());
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
@@ -62,6 +62,7 @@ void CLWeightsReshapeKernel::configure(const ICLTensor *input, const ICLTensor *
if(biases != nullptr)
{
+ ARM_COMPUTE_ERROR_ON(is_data_type_quantized_asymmetric(data_type));
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, biases);
ARM_COMPUTE_ERROR_ON((input->info()->num_dimensions() == 4) && (biases->info()->num_dimensions() != 1));
@@ -75,16 +76,13 @@ void CLWeightsReshapeKernel::configure(const ICLTensor *input, const ICLTensor *
_input = input;
// Create build options
- std::set<std::string> build_opts;
- build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
- build_opts.emplace(((biases != nullptr) ? "-DHAS_BIAS" : ""));
- if(is_data_type_fixed_point(input->info()->data_type()))
- {
- build_opts.emplace("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
- }
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_opts.add_option_if(biases != nullptr, "-DHAS_BIAS");
+ build_opts.add_option_if(is_data_type_fixed_point(data_type), "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reshape_to_columns", build_opts));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reshape_to_columns", build_opts.options()));
// Set static arguments
unsigned int idx = num_arguments_per_3D_tensor() + num_arguments_per_2D_tensor();
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
index 102d08c7ba..c6f7ca4fb2 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
@@ -44,8 +44,6 @@ namespace
Error validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(max > 255);
ARM_COMPUTE_RETURN_ERROR_ON(min < 0 || min > max);
@@ -56,6 +54,13 @@ Error validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, cons
ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
}
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ }
+
return Error{};
}
@@ -67,11 +72,17 @@ std::pair<Error, Window> validate_and_configure_window(ITensorInfo *input, ITens
Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
bool window_changed = update_window_and_padding(win,
- input_access,
- output_result_access);
+ input_access);
+
+ if(output->total_size() != 0)
+ {
+ AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
+ window_changed = window_changed || update_window_and_padding(win, output_result_access);
+
+ output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ }
if(bias != nullptr)
{
@@ -79,8 +90,6 @@ std::pair<Error, Window> validate_and_configure_window(ITensorInfo *input, ITens
window_changed = window_changed || update_window_and_padding(win, bias_access);
}
- output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
Error err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Error{};
return std::make_pair(err, win);
}
@@ -255,6 +264,7 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure(const
Error NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(),
(bias != nullptr) ? bias->clone().get() : nullptr,
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
index edd6a9fd80..68b81d8a79 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
@@ -43,8 +43,6 @@ namespace
Error validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(max > 255);
ARM_COMPUTE_RETURN_ERROR_ON(min < 0 || min > max);
@@ -55,6 +53,13 @@ Error validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, cons
ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
}
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ }
+
return Error{};
}
@@ -66,11 +71,17 @@ std::pair<Error, Window> validate_and_configure_window(ITensorInfo *input, ITens
Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
bool window_changed = update_window_and_padding(win,
- input_access,
- output_result_access);
+ input_access);
+
+ if(output->total_size() != 0)
+ {
+ AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
+ window_changed = window_changed || update_window_and_padding(win, output_result_access);
+
+ output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ }
if(bias != nullptr)
{
@@ -78,8 +89,6 @@ std::pair<Error, Window> validate_and_configure_window(ITensorInfo *input, ITens
window_changed = window_changed || update_window_and_padding(win, bias_access);
}
- output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
Error err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Error{};
return std::make_pair(err, win);
}
@@ -262,6 +271,7 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::configure(const ITensor *inp
Error NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(),
(bias != nullptr) ? bias->clone().get() : nullptr,
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index 8d45416b30..66548d19b2 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/Size2D.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include <cmath>
@@ -42,19 +43,22 @@ CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights(std::shared_p
void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose1xW)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(weights, output);
ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
if(biases != nullptr)
{
+ ARM_COMPUTE_ERROR_ON(is_data_type_quantized_asymmetric(weights->info()->data_type()));
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
ARM_COMPUTE_ERROR_ON(biases->info()->dimension(0) != weights->info()->dimension(3));
ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 1);
}
- const bool _has_bias = (biases != nullptr);
+ const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
+ const unsigned bias_element = (append_biases) ? 1 : 0;
+ const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
_transpose1xW = transpose1xW;
@@ -62,7 +66,7 @@ void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const
{
// Create tensor to store the reshaped weights
const unsigned int mat_weights_cols = weights->info()->dimension(3);
- const unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + (_has_bias ? 1 : 0);
+ const unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + bias_element;
TensorShape shape_wr(mat_weights_cols, mat_weights_rows);
const DataType dt = weights->info()->data_type();
const int fixed_point_position = weights->info()->fixed_point_position();
@@ -70,13 +74,13 @@ void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const
_weights_reshaped.allocator()->init(info_wr);
_memory_group.manage(&_weights_reshaped);
- _weights_reshape_kernel.configure(weights, biases, &_weights_reshaped);
+ _weights_reshape_kernel.configure(weights, biases_to_use, &_weights_reshaped);
_weights_transposed_kernel.configure(&_weights_reshaped, output);
_weights_reshaped.allocator()->allocate();
}
else
{
- _weights_reshape_kernel.configure(weights, biases, output);
+ _weights_reshape_kernel.configure(weights, biases_to_use, output);
}
}
@@ -95,36 +99,73 @@ void CLConvolutionLayerReshapeWeights::run()
}
CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _output_col2im_kernel(), _input_im2col_reshaped(),
- _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _has_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
+ : _memory_group(memory_manager), _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _output_col2im_kernel(),
+ _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _append_bias(false), _is_fully_connected_convolution(false),
+ _are_weights_reshaped(false), _is_quantized(false)
{
}
+void CLConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed)
+{
+ if(_is_quantized)
+ {
+ // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
+ // Extract and negate input and weights offset
+ const QuantizationInfo input_quantization_info = input->info()->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
+
+ input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
+ weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
+
+ _mm_gemmlowp.configure(input, weights, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/));
+
+ // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
+ input->info()->set_quantization_info(input_quantization_info);
+ weights->info()->set_quantization_info(weights_quantization_info);
+ }
+ else
+ {
+ _mm_kernel.configure(input, weights, output, 1.f, is_interleaved_transposed);
+ }
+}
+
void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, weights);
ARM_COMPUTE_ERROR_ON(!weights_info.are_reshaped() && weights->info()->dimension(2) != input->info()->dimension(2));
ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
+ ARM_COMPUTE_ERROR_ON(weights_info.are_reshaped() && is_data_type_quantized_asymmetric(input->info()->data_type()));
+
+ _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
if(biases != nullptr)
{
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
+ if(_is_quantized)
+ {
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
+ }
ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, biases);
ARM_COMPUTE_ERROR_ON(!weights_info.are_reshaped() && biases->info()->dimension(0) != weights->info()->dimension(3));
ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 1);
}
- const DataType dt = input->info()->data_type();
- const int fixed_point_position = input->info()->fixed_point_position();
+ const DataType dt = input->info()->data_type();
// Set the GPU target for matrix multiply
_mm_kernel.set_target(CLScheduler::get().target());
- _has_bias = (biases != nullptr);
+ _append_bias = (biases != nullptr) && (!_is_quantized);
_are_weights_reshaped = weights_info.are_reshaped();
+ const unsigned bias_element = (_append_bias) ? 1 : 0;
+ const ICLTensor *biases_to_use = (_append_bias) ? biases : nullptr;
+
// Get parameters from conv_info
unsigned int stride_x = 0;
unsigned int stride_y = 0;
@@ -141,36 +182,36 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
// Check if its a "fully connected" convolution
_is_fully_connected_convolution = ((conv_w == 1) && (conv_h == 1));
+ const bool run_interleaved = (!_is_fully_connected_convolution && !_is_quantized);
unsigned int mat_weights_cols = weights->info()->dimension(3);
- unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + (_has_bias ? 1 : 0);
+ unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + bias_element;
// Reshape weights if needed
if(_are_weights_reshaped)
{
mat_weights_cols = weights_info.num_kernels();
const unsigned int quarter_reshaped_cols = weights->info()->dimension(0) / 4;
- mat_weights_rows = (_has_bias ? 1 + quarter_reshaped_cols : quarter_reshaped_cols);
+ mat_weights_rows = quarter_reshaped_cols + bias_element;
}
else
{
- if(_is_fully_connected_convolution)
+ if(_is_fully_connected_convolution || _is_quantized)
{
// Create tensor to store the reshaped weights
TensorShape shape_wr(mat_weights_cols, mat_weights_rows);
- TensorInfo info_wr(shape_wr, 1, dt, fixed_point_position);
- _weights_reshaped.allocator()->init(info_wr);
- _reshape_weights.configure(weights, biases, &_weights_reshaped, false /* 1xW transpose */);
+ _weights_reshaped.allocator()->init(weights->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_wr));
+ _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, false /* 1xW transpose */);
}
else
{
// Create tensor to store transposed weights
const float transpose_width = 16.0f / input->info()->element_size();
TensorShape shape_wt(mat_weights_rows * static_cast<unsigned int>(transpose_width), static_cast<unsigned int>(std::ceil(mat_weights_cols / transpose_width)));
- TensorInfo info_wt(shape_wt, 1, dt, fixed_point_position);
- _weights_reshaped.allocator()->init(info_wt);
- _reshape_weights.configure(weights, biases, &_weights_reshaped, true /* 1xW transpose */);
+ _weights_reshaped.allocator()->init(weights->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_wt));
+ _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, true /* 1xW transpose */);
}
+ _weights_reshaped.info()->set_quantization_info(weights->info()->quantization_info());
weights = &_weights_reshaped;
}
@@ -181,16 +222,16 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
shape_im2col.set(0, mat_input_cols);
shape_im2col.set(1, mat_input_rows);
shape_im2col.set(2, 1);
- _input_im2col_reshaped.allocator()->init(TensorInfo(shape_im2col, 1, dt, fixed_point_position));
+ _input_im2col_reshaped.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col));
_memory_group.manage(&_input_im2col_reshaped);
// Create tensor (interleave) to prepare input tensor for GEMM
- if(!_is_fully_connected_convolution)
+ if(run_interleaved)
{
TensorShape shape_interleaved = shape_im2col;
shape_interleaved.set(0, shape_interleaved.x() * 4);
shape_interleaved.set(1, std::ceil(shape_interleaved.y() / 4.f));
- _input_interleaved_reshaped.allocator()->init(TensorInfo(shape_interleaved, 1, dt, fixed_point_position));
+ _input_interleaved_reshaped.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_interleaved));
_memory_group.manage(&_input_interleaved_reshaped);
}
@@ -198,30 +239,51 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
TensorShape shape_gemm = _input_im2col_reshaped.info()->tensor_shape();
shape_gemm.set(0, mat_weights_cols);
shape_gemm.set(1, mat_input_rows);
- _gemm_output.allocator()->init(TensorInfo(shape_gemm, 1, dt, fixed_point_position));
+ const DataType gemm_data_type = _is_quantized ? DataType::S32 : dt;
+ // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
+ TensorInfo info_gemm(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_gemm).set_data_type(gemm_data_type).set_quantization_info(
+ output->info()->quantization_info()));
+ _gemm_output.allocator()->init(info_gemm);
_memory_group.manage(&_gemm_output);
// Configure kernels
-
_input_im2col_kernel.set_target(CLScheduler::get().target());
- _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias);
+ _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _append_bias);
// Configure matrix multiply
- if(_is_fully_connected_convolution)
+ if(run_interleaved)
{
- // The matrix A and Matrix B have not been reshaped
- _mm_kernel.configure(&_input_im2col_reshaped, weights, &_gemm_output, 1.0f, false);
+ _input_interleave_kernel.configure(&_input_im2col_reshaped, &_input_interleaved_reshaped);
+ configure_mm(&_input_interleaved_reshaped, weights, &_gemm_output);
+ _input_interleaved_reshaped.allocator()->allocate();
}
else
{
- _input_interleave_kernel.configure(&_input_im2col_reshaped, &_input_interleaved_reshaped);
- _mm_kernel.configure(&_input_interleaved_reshaped, weights, &_gemm_output, 1.0f);
- _input_interleaved_reshaped.allocator()->allocate();
+ configure_mm(&_input_im2col_reshaped, weights, &_gemm_output, false);
}
_input_im2col_reshaped.allocator()->allocate();
+
+ // Configure output stage for quantized case
+ if(_is_quantized)
+ {
+ float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
+ int output_multiplier, output_shift;
+ quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+ _gemmlowp_output_stage.configure(&_gemm_output, biases, &_tmp_output, output_multiplier, output_shift, output->info()->quantization_info().offset);
+ _gemm_output.allocator()->allocate();
+ }
+
+ // Configure Col2Im
_output_col2im_kernel.set_target(CLScheduler::get().target());
- _output_col2im_kernel.configure(&_gemm_output, output, std::make_pair(conv_w, conv_h));
- _gemm_output.allocator()->allocate();
+ _output_col2im_kernel.configure(_is_quantized ? &_tmp_output : &_gemm_output, output, std::make_pair(conv_w, conv_h));
+ if(_is_quantized)
+ {
+ _tmp_output.allocator()->allocate();
+ }
+ else
+ {
+ _gemm_output.allocator()->allocate();
+ }
ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
@@ -243,15 +305,30 @@ void CLConvolutionLayer::run()
_memory_group.acquire();
- // Run input reshaping
+ // Run im2col
CLScheduler::get().enqueue(_input_im2col_kernel);
- if(!_is_fully_connected_convolution)
+
+ if(!_is_fully_connected_convolution && !_is_quantized)
{
+ // Run interleave4x4
CLScheduler::get().enqueue(_input_interleave_kernel);
}
// Runs matrix multiply on reshaped matrices
- CLScheduler::get().enqueue(_mm_kernel);
+ if(_is_quantized)
+ {
+ _mm_gemmlowp.run();
+ }
+ else
+ {
+ CLScheduler::get().enqueue(_mm_kernel);
+ }
+
+ // Run output stage for quantized case
+ if(_is_quantized)
+ {
+ _gemmlowp_output_stage.run();
+ }
// Reshape output matrix
CLScheduler::get().enqueue(_output_col2im_kernel, false);
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 6cc2f4bdb7..7fd81cdb94 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -50,13 +50,20 @@ void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor
{
if(_is_quantized)
{
+ // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
// Extract and negate input and weights offset
- QuantizationInfo input_quantization_info = input->info()->quantization_info();
- QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
+ const QuantizationInfo input_quantization_info = input->info()->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
+
input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
+
// Configure gemmlowp function
_mm_gemmlowp.configure(input, weights, output);
+
+ // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
+ input->info()->set_quantization_info(input_quantization_info);
+ weights->info()->set_quantization_info(weights_quantization_info);
}
else
{
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index 5d2d13e243..5c6f5b4ed0 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -35,11 +35,11 @@ using namespace arm_compute;
CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _mm_kernel(), _mtx_a_reshape_kernel(), _mtx_b_reshape_kernel(), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), _offset_contribution_kernel(),
- _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _a_offset(0), _b_offset(0), _is_interleaved_transposed(true)
+ _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _a_offset(0), _b_offset(0), _is_interleaved_transposed(true), _is_first_run(true), _reshape_b_only_on_first_run(false)
{
}
-void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor *b, ICLTensor *output)
+void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor *b, ICLTensor *output, const GEMMInfo &gemm_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
@@ -47,9 +47,12 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A");
ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The output matrix must have the same number of columns as the matrix B");
+ ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
+ ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
- _a_offset = a->info()->quantization_info().offset;
- _b_offset = b->info()->quantization_info().offset;
+ _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
+ _a_offset = a->info()->quantization_info().offset;
+ _b_offset = b->info()->quantization_info().offset;
// If the input tensor has less than 16 rows, we run a special version of GEMMLowp without reshaping the input tensors
_is_interleaved_transposed = a->info()->dimension(1) > 16;
@@ -93,7 +96,8 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
if(_a_offset != 0)
{
TensorShape shape_vector_sum_col = b->info()->tensor_shape();
- if(b->info()->num_dimensions() > 1)
+
+ if(shape_vector_sum_col.num_dimensions() > 1)
{
shape_vector_sum_col.remove_dimension(1);
}
@@ -152,8 +156,21 @@ void CLGEMMLowpMatrixMultiplyCore::run()
// Run reshape matrix A
CLScheduler::get().enqueue(_mtx_a_reshape_kernel, false);
- // Run reshape matrix B
- CLScheduler::get().enqueue(_mtx_b_reshape_kernel, false);
+ if(_is_first_run || !_reshape_b_only_on_first_run)
+ {
+ // Run reshape matrix B
+ CLScheduler::get().enqueue(_mtx_b_reshape_kernel, false);
+ }
+ }
+
+ // Note: if _reshape_b_only_on_first_run = true, the reduction kernel can be executed only once
+ if(_is_first_run || !_reshape_b_only_on_first_run)
+ {
+ // Run matrix B reduction kernel only if _a_offset is not equal to 0
+ if(_a_offset != 0)
+ {
+ CLScheduler::get().enqueue(_mtx_b_reduction_kernel, false);
+ }
}
// Run matrix multiply
@@ -165,14 +182,10 @@ void CLGEMMLowpMatrixMultiplyCore::run()
CLScheduler::get().enqueue(_mtx_a_reduction_kernel, false);
}
- // Run matrix B reduction kernel only if _a_offset is not equal to 0
- if(_a_offset != 0)
- {
- CLScheduler::get().enqueue(_mtx_b_reduction_kernel, false);
- }
-
// Run offset contribution kernel
CLScheduler::get().enqueue(_offset_contribution_kernel, true);
_memory_group.release();
+
+ _is_first_run = false;
}
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 2c6515c1df..a18f48d9a7 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -52,10 +52,11 @@ NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemo
{
}
-void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output)
+void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output, const GEMMInfo &gemm_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
- ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), output->info()));
+ ARM_COMPUTE_UNUSED(gemm_info);
+ ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), output->info(), gemm_info));
_a_offset = a->info()->quantization_info().offset;
_b_offset = b->info()->quantization_info().offset;
@@ -198,7 +199,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
}
}
-Error NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output)
+Error NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
@@ -209,6 +210,9 @@ Error NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensor
"The output matrix must have the same number of rows as the matrix A");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((b)->dimension(0) != (output)->dimension(0),
"The output matrix must have the same number of columns as the matrix B");
+ ARM_COMPUTE_UNUSED(gemm_info);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
int32_t a_offset = a->quantization_info().offset;
int32_t b_offset = b->quantization_info().offset;
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index a6e07248aa..56e10f01b3 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -45,7 +45,8 @@ namespace
{
RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+constexpr AbsoluteTolerance<float> tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
/** CNN data types */
@@ -55,6 +56,7 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F32,
DataType::QS8,
DataType::QS16,
+ DataType::QASYMM8,
});
} // namespace
@@ -67,17 +69,22 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Set fixed point position data type allowed
int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
+ auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
+
// Create tensors
- CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position);
- CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ const QuantizationInfo src_quantization_info = src.info()->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
+
// Create and configure function
CLConvolutionLayer conv;
conv.configure(&src, &weights, &bias, &dst, info);
@@ -93,6 +100,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
validate(bias.info()->valid_region(), bias_valid_region);
validate(dst.info()->valid_region(), dst_valid_region);
+ // Validate QuantizationInfo
+ ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
+
// Validate padding
//TODO(COMPMID-415) Need to validate padding?
}
@@ -143,7 +154,7 @@ TEST_SUITE_END()
template <typename T>
using CLConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLConvolutionLayer, T>;
-TEST_SUITE(Quantized)
+TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
@@ -153,7 +164,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int8_t>, fr
framework::dataset::make("FractionalBits", 4, 7)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
@@ -162,7 +173,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int8_t>, fr
framework::dataset::make("FractionalBits", 4, 7)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
}
TEST_SUITE_END()
@@ -175,7 +186,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int16_t>, f
framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
@@ -184,7 +195,31 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int16_t>, f
framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+template <typename T>
+using CLConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLConvolutionLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
TEST_SUITE_END()
TEST_SUITE_END()
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index 84e1bca6a5..4b747d64f7 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -232,4 +232,4 @@ TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
} // namespace test
-} // namespace arm_compute
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp
index e53f5fd407..0d8c8774b8 100644
--- a/tests/validation/CL/FullyConnectedLayer.cpp
+++ b/tests/validation/CL/FullyConnectedLayer.cpp
@@ -99,6 +99,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ const QuantizationInfo src_quantization_info = src.info()->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
+
// Create and configure function.
CLFullyConnectedLayer fc;
fc.configure(&src, &weights, &bias, &dst, transpose_weights, !reshape_weights);
@@ -106,6 +109,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
// Validate valid region
const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape);
validate(dst.info()->valid_region(), dst_valid_region);
+
+ // Validate QuantizationInfo
+ ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
}
template <typename T>
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 859780812a..48b4665fe7 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -47,17 +47,24 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ConvolutionValidationFixedPointFixture : public framework::Fixture
+class ConvolutionValidationGenericFixture : public framework::Fixture
{
public:
+ using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
+
+public:
template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type, int fractional_bits)
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights,
+ DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
{
- _fractional_bits = fractional_bits;
- _data_type = data_type;
-
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, fractional_bits);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, fractional_bits);
+ _data_type = data_type;
+ _is_quantized = is_data_type_quantized_asymmetric(data_type);
+ _bias_data_type = _is_quantized ? DataType::S32 : data_type;
+ _fractional_bits = fractional_bits;
+ _quantization_info = quantization_info;
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info);
}
protected:
@@ -66,6 +73,18 @@ protected:
{
switch(tensor.data_type())
{
+ case DataType::QASYMM8:
+ {
+ std::uniform_int_distribution<uint8_t> distribution(0, 3);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::S32:
+ {
+ std::uniform_int_distribution<int32_t> distribution(-100, 100);
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F16:
case DataType::F32:
{
@@ -79,7 +98,7 @@ protected:
}
TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- bool reshape_weights, DataType data_type, int fixed_point_position)
+ bool reshape_weights)
{
WeightsInfo weights_info(!reshape_weights, weights_shape.x(), weights_shape.y(), weights_shape[3]);
TensorShape reshaped_weights_shape(weights_shape);
@@ -90,15 +109,16 @@ protected:
const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
bool is_optimised = false;
#if defined(__arm__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && data_type == DataType::F32;
+ is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && _data_type == DataType::F32;
#elif defined(__aarch64__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && data_type == DataType::F32;
+ is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && _data_type == DataType::F32;
#endif /* defined(__arm__) || defined(__aarch64__) */
reshaped_weights_shape.collapse(3);
- if(bias_shape.total_size() > 0)
+ if(bias_shape.total_size() > 0 && !_is_quantized)
{
+ // Add bias to the weights reshaped matrix
reshaped_weights_shape.set(0, reshaped_weights_shape.x() + 1);
}
@@ -110,17 +130,17 @@ protected:
}
else
{
- const int interleave_width = 16 / data_size_from_type(data_type);
+ const int interleave_width = 16 / data_size_from_type(_data_type);
reshaped_weights_shape.set(0, reshaped_weights_shape.x() * interleave_width);
reshaped_weights_shape.set(1, static_cast<unsigned int>(std::ceil(reshaped_weights_shape.y() / static_cast<float>(interleave_width))));
}
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position);
- TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, data_type, 1, fixed_point_position);
- TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position);
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info);
+ TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info);
// Create and configure function
FunctionType conv;
@@ -150,20 +170,28 @@ protected:
const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
bool is_optimised = false;
#if defined(__arm__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && data_type == DataType::F32;
+ is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && _data_type == DataType::F32;
#elif defined(__aarch64__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && data_type == DataType::F32;
+ is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && _data_type == DataType::F32;
#endif /* defined(__arm__) || defined(__aarch64__) */
TensorShape tmp_weights_shape(weights_shape);
- SimpleTensor<T> tmp_weights(tmp_weights_shape, data_type, 1, fixed_point_position);
- SimpleTensor<T> tmp_bias(bias_shape, data_type, 1, fixed_point_position);
+ SimpleTensor<T> tmp_weights(tmp_weights_shape, _data_type, 1, _fractional_bits, _quantization_info);
// Fill with original shape
fill(tmp_weights, 1);
- fill(tmp_bias, 2);
- tmp_weights = linearise_weights(tmp_weights, &tmp_bias);
+ if(_is_quantized)
+ {
+ fill(AccessorType(bias), 2);
+ tmp_weights = linearise_weights(tmp_weights);
+ }
+ else
+ {
+ SimpleTensor<T> tmp_bias(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info);
+ fill(tmp_bias, 2);
+ tmp_weights = linearise_weights(tmp_weights, &tmp_bias);
+ }
if(!is_fully_connected_convolution && !is_optimised)
{
@@ -192,13 +220,12 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, int fixed_point_position)
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position };
- SimpleTensor<T> bias{ bias_shape, data_type, 1, fixed_point_position };
+ SimpleTensor<T> src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info };
+ SimpleTensor<T> weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info };
+ SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info };
// Fill reference
fill(src, 0);
@@ -208,10 +235,13 @@ protected:
return reference::convolution_layer<T>(src, weights, bias, output_shape, info);
}
- TensorType _target{};
- SimpleTensor<T> _reference{};
- int _fractional_bits{};
- DataType _data_type{};
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ DataType _data_type{};
+ DataType _bias_data_type{};
+ int _fractional_bits{};
+ QuantizationInfo _quantization_info{};
+ bool _is_quantized = false;
private:
template <typename U>
@@ -241,7 +271,6 @@ private:
dst[dst_idx] = weights[weights_idx];
}
-
if(biases != nullptr)
{
// Fill last row with biases
@@ -260,13 +289,37 @@ private:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ConvolutionValidationFixture : public ConvolutionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type)
{
- ConvolutionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, 0);
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, 0, QuantizationInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ConvolutionValidationFixedPointFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type, int fractional_bits)
+ {
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, fractional_bits,
+ QuantizationInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type,
+ QuantizationInfo quantization_info)
+ {
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, 0, quantization_info);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index c504e2e10d..06d6be3fa4 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -302,4 +302,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */
+#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */ \ No newline at end of file