aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-01-12 16:29:45 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:43:42 +0000
commitf72f9367d1eddee91f15a64952b99ee6b80b821d (patch)
tree0d3296219ca7919c263b3701ab22b5468df86354
parenta026e981c607272181292b044c91f73a27d2bcd9 (diff)
downloadComputeLibrary-f72f9367d1eddee91f15a64952b99ee6b80b821d.tar.gz
COMPMID-791: Adds support of QASYMM8 in NEDepthwiseConvolution3x3
Change-Id: I1a9ed6c3420ddf8978aeaad48d9915333b006b49 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/116374 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/NEON/NEAsymm.h70
-rw-r--r--arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h5
-rw-r--r--arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h24
-rw-r--r--arm_compute/core/NEON/kernels/convolution/NEDirectConvolutionDetail.h252
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h6
-rw-r--r--src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp213
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp144
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp59
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp48
-rw-r--r--tests/validation/CL/DepthwiseConvolutionLayer.cpp8
-rw-r--r--tests/validation/NEON/DepthwiseConvolutionLayer.cpp27
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h8
-rw-r--r--tests/validation/reference/DepthwiseConvolutionLayer.cpp8
13 files changed, 653 insertions, 219 deletions
diff --git a/arm_compute/core/NEON/NEAsymm.h b/arm_compute/core/NEON/NEAsymm.h
index f0d7439d40..faff59563b 100644
--- a/arm_compute/core/NEON/NEAsymm.h
+++ b/arm_compute/core/NEON/NEAsymm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,6 +56,74 @@ int32x4_t rounding_divide_by_pow2(int32x4_t x, int exponent);
* @return A 16-component vector in QASYMM8 format, saturated to fit
*/
uint8x16_t vmlaq_qasymm8(qasymm8x16_t vd, float32x4_t vs, float32x4_t vo);
+
+/** Performs final quantization step on 16 elements
+ *
+ * @tparam is_bounded_relu Specified if a fused bounded relu should be applied
+ *
+ * @param in_s32 Input to be quantized.
+ * @param result_fixedpoint_multiplier Result multiplier parameter
+ * @param result_shift Result shift parameter
+ * @param result_offset_after_shift_s32 Result offset parameter
+ * @param min_u8 Relu lower bound
+ * @param max_u8 Relu upper bound
+ *
+ * @return Quantized values
+ */
+template <bool is_bounded_relu>
+uint8x16_t finalize_quantization(int32x4x4_t &in_s32,
+ int result_fixedpoint_multiplier,
+ int32_t result_shift,
+ int32x4_t result_offset_after_shift_s32,
+ uint8x16_t min_u8,
+ uint8x16_t max_u8)
+{
+ const static int32x4_t zero_s32 = vdupq_n_s32(0);
+
+ // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar
+ in_s32.val[0] = vqrdmulhq_n_s32(in_s32.val[0], result_fixedpoint_multiplier);
+ in_s32.val[1] = vqrdmulhq_n_s32(in_s32.val[1], result_fixedpoint_multiplier);
+ in_s32.val[2] = vqrdmulhq_n_s32(in_s32.val[2], result_fixedpoint_multiplier);
+ in_s32.val[3] = vqrdmulhq_n_s32(in_s32.val[3], result_fixedpoint_multiplier);
+
+ // Round to the nearest division by a power-of-two using result_shift_s32
+ in_s32.val[0] = rounding_divide_by_pow2(in_s32.val[0], result_shift);
+ in_s32.val[1] = rounding_divide_by_pow2(in_s32.val[1], result_shift);
+ in_s32.val[2] = rounding_divide_by_pow2(in_s32.val[2], result_shift);
+ in_s32.val[3] = rounding_divide_by_pow2(in_s32.val[3], result_shift);
+
+ // Add the offset terms
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], result_offset_after_shift_s32);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], result_offset_after_shift_s32);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], result_offset_after_shift_s32);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], result_offset_after_shift_s32);
+
+ // Saturate negative values
+ in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
+ in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
+ in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
+ in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
+
+ // Convert S32 to S16
+ const int16x8x2_t in_s16 =
+ {
+ {
+ vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
+ vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
+ }
+ };
+
+ // Convert S16 to U8
+ uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
+
+ if(is_bounded_relu)
+ {
+ out_u8 = vmaxq_u8(out_u8, min_u8);
+ out_u8 = vminq_u8(out_u8, max_u8);
+ }
+
+ return out_u8;
+}
} // namespace arm_compute
#include "arm_compute/core/NEON/NEAsymm.inl"
#endif // __ARM_COMPUTE_NEASYMM_H__
diff --git a/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h b/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
index b8f01cb635..38e2a5ddfd 100644
--- a/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,7 @@ public:
NEDepthwiseConvolutionLayer3x3Kernel &operator=(NEDepthwiseConvolutionLayer3x3Kernel &&) = default;
/** Initialize the function's source, destination, conv and border_size.
*
- * @param[in] input Source tensor. DataType supported: F32.
+ * @param[in] input Source tensor. DataType supported: QASYMM8, F32.
* @param[in] weights Weights tensor. This is a 3D tensor with dimensions [3, 3, IFM]. Data type supported: Same as @p input.
* @param[out] output Destination tensor. Data type supported: Same as @p input.
* @param[in] conv_info Padding and stride information to use for the convolution.
@@ -64,6 +64,7 @@ private:
ITensor *_output;
const ITensor *_weights;
PadStrideInfo _conv_info;
+ unsigned int _num_elems_written_per_iteration;
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEDEPTHWISECONVOLUTIONKERNEL3x3_H__ */ \ No newline at end of file
diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
index 46d52fc182..c42e5c43b5 100644
--- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,13 +50,17 @@ public:
~NEDirectConvolutionLayerOutputStageKernel() = default;
/** Set the accumulate buffer and the biases of the kernel.
*
- * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: QS16/QS32/F16/F32
- * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
- * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
- * Data type supported: QS8/QS16/F16/F32
+ * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
+ * Data type supported: QS16/QS32/F16/F32
+ * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
+ * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
+ * Data type supported: QS8/QS16/F16/F32
+ * @param[in] result_fixedpoint_multiplier (Optional)Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift (Optional)Integer value used to round to nearest division by a power-of-two the result after the fixed point multiplication
+ * @param[in] result_offset_after_shift (Optional)Offset to be applied to result before converting it back to QASYMM8
*/
- void configure(ITensor *input, const ITensor *bias = nullptr, ITensor *output = nullptr);
+ void configure(ITensor *input, const ITensor *bias = nullptr, ITensor *output = nullptr,
+ int result_fixedpoint_multiplier = 0, int result_shift = 0, int result_offset_after_shift = 0);
/** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayerOutputStageKernel
*
* @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
@@ -72,13 +76,17 @@ public:
void run(const Window &window, const ThreadInfo &info) override;
private:
- using OutputStageKernel = void(ITensor *input, const ITensor *bias, const Window window, ITensor *output);
+ using OutputStageKernel = void(ITensor *input, const ITensor *bias, const Window &window, ITensor *output,
+ int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift);
private:
OutputStageKernel *_func;
ITensor *_input;
const ITensor *_bias;
ITensor *_output;
+ int _result_fixedpoint_multiplier;
+ int _result_shift;
+ int _result_offset_after_shift;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/convolution/NEDirectConvolutionDetail.h b/arm_compute/core/NEON/kernels/convolution/NEDirectConvolutionDetail.h
index c358558610..908fa13876 100644
--- a/arm_compute/core/NEON/kernels/convolution/NEDirectConvolutionDetail.h
+++ b/arm_compute/core/NEON/kernels/convolution/NEDirectConvolutionDetail.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,12 +36,14 @@ namespace detail
{
/** Loads a 3x3 matrix as a row (float).
*
- * @param[in] ptr Pointer to a float 3x3 matrix.
+ * @param[in] ptr Pointer to a float 3x3 matrix.
+ * @param[in] weights_offset (Optional) Weights quantization offset.
*
* @return The loaded matrix.
*/
-inline float32x4x3_t load_matrix_row(const float *ptr)
+inline float32x4x3_t load_matrix_row(const float *ptr, int weights_offset = 0)
{
+ ARM_COMPUTE_UNUSED(weights_offset);
const float32x4x3_t r =
{
{
@@ -55,12 +57,14 @@ inline float32x4x3_t load_matrix_row(const float *ptr)
/** Loads a 3x3 matrix as a row (qint8_t).
*
- * @param[in] ptr Pointer to a qint8 3x3 matrix.
+ * @param[in] ptr Pointer to a qint8 3x3 matrix.
+ * @param[in] weights_offset (Optional) Weights quantization offset.
*
* @return The loaded matrix.
*/
-inline qint8x8x3_t load_matrix_row(const qint8_t *ptr)
+inline qint8x8x3_t load_matrix_row(const qint8_t *ptr, int weights_offset = 0)
{
+ ARM_COMPUTE_UNUSED(weights_offset);
/* ptr is a pointer to a row in a 3x3 matrix, the function returns 3 vectors holding exactly the same value in all lanes:
r.val[0] contains the first element, r.val[1] the second element and r.val[2] the third element (in all lanes) */
const qint8x8x3_t r =
@@ -74,6 +78,30 @@ inline qint8x8x3_t load_matrix_row(const qint8_t *ptr)
return r;
}
+/** Loads a 3x3 matrix as a row (uint8_t).
+ *
+ * @param[in] ptr Pointer to a uint8_t 3x3 matrix.
+ * @param[in] weights_offset (Optional) Weights quantization offset.
+ *
+ * @return The loaded matrix.
+ */
+inline int32x4x3_t load_matrix_row(const uint8_t *ptr, int weights_offset = 0)
+{
+ const int32x4_t v_weights_offset = vdupq_n_s32(weights_offset);
+
+ /* ptr is a pointer to a row in a 3x3 matrix, the function returns 3 vectors holding exactly the same value in all lanes:
+ r.val[0] contains the first element, r.val[1] the second element and r.val[2] the third element (in all lanes) */
+ int32x4x3_t r =
+ {
+ {
+ vaddq_s32(v_weights_offset, vdupq_n_s32(*ptr)),
+ vaddq_s32(v_weights_offset, vdupq_n_s32(*(ptr + 1))),
+ vaddq_s32(v_weights_offset, vdupq_n_s32(*(ptr + 2)))
+ }
+ };
+ return r;
+}
+
/** Perform a convolve3x3 on float32.
*
* @param[in] in_top Pointer to the first row of the input.
@@ -83,15 +111,21 @@ inline qint8x8x3_t load_matrix_row(const qint8_t *ptr)
* @param[in] m1 Second row of the filter.
* @param[in] m2 Third row of the filter.
* @param[in] fixed_point_position (Optional) Fixed point position.
+ * @param[in] input_offset (Optional) Input quantization offset.
*
*/
template <unsigned int stridex>
-float32x4x2_t convolve_3x3(const float *in_top, const float *in_mid, const float *in_low, const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2, int fixed_point_position);
+float32x4x2_t convolve_3x3(const float *in_top, const float *in_mid, const float *in_low,
+ const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2,
+ int fixed_point_position, int input_offset = 0);
template <>
-inline float32x4x2_t convolve_3x3<1>(const float *in_top, const float *in_mid, const float *in_low, const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2, int fixed_point_position)
+inline float32x4x2_t convolve_3x3<1>(const float *in_top, const float *in_mid, const float *in_low,
+ const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2,
+ int fixed_point_position, int input_offset)
{
ARM_COMPUTE_UNUSED(fixed_point_position);
+ ARM_COMPUTE_UNUSED(input_offset);
const float32x4x3_t vtop =
{
@@ -149,9 +183,13 @@ inline float32x4x2_t convolve_3x3<1>(const float *in_top, const float *in_mid, c
}
template <>
-inline float32x4x2_t convolve_3x3<2>(const float *in_top, const float *in_mid, const float *in_low, const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2, int fixed_point_position)
+inline float32x4x2_t convolve_3x3<2>(const float *in_top, const float *in_mid, const float *in_low,
+ const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2,
+ int fixed_point_position, int input_offset)
{
- float32x4x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position);
+ ARM_COMPUTE_UNUSED(input_offset);
+
+ float32x4x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position, input_offset);
out.val[0] = vsetq_lane_f32(vgetq_lane_f32(out.val[0], 2), out.val[0], 1);
out.val[0] = vsetq_lane_f32(vgetq_lane_f32(out.val[1], 0), out.val[0], 2);
out.val[0] = vsetq_lane_f32(vgetq_lane_f32(out.val[1], 2), out.val[0], 3);
@@ -159,9 +197,13 @@ inline float32x4x2_t convolve_3x3<2>(const float *in_top, const float *in_mid, c
}
template <>
-inline float32x4x2_t convolve_3x3<3>(const float *in_top, const float *in_mid, const float *in_low, const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2, int fixed_point_position)
+inline float32x4x2_t convolve_3x3<3>(const float *in_top, const float *in_mid, const float *in_low,
+ const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2,
+ int fixed_point_position, int input_offset)
{
- float32x4x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position);
+ ARM_COMPUTE_UNUSED(input_offset);
+
+ float32x4x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position, input_offset);
out.val[0] = vsetq_lane_f32(vgetq_lane_f32(out.val[0], 3), out.val[0], 1);
return out;
}
@@ -175,15 +217,21 @@ inline float32x4x2_t convolve_3x3<3>(const float *in_top, const float *in_mid, c
* @param[in] m1 Second row of the filter.
* @param[in] m2 Third row of the filter.
* @param[in] fixed_point_position (Optional) Fixed point position.
+ * @param[in] input_offset (Optional) Input quantization offset.
*
*/
template <unsigned int stridex>
-qint16x8x2_t convolve_3x3(const qint8_t *in_top, const qint8_t *in_mid, const qint8_t *in_low, const qint8x8x3_t &m0, const qint8x8x3_t &m1, const qint8x8x3_t &m2, int fixed_point_position);
+qint16x8x2_t convolve_3x3(const qint8_t *in_top, const qint8_t *in_mid, const qint8_t *in_low,
+ const qint8x8x3_t &m0, const qint8x8x3_t &m1, const qint8x8x3_t &m2,
+ int fixed_point_position, int input_offset = 0);
template <>
-inline qint16x8x2_t convolve_3x3<1>(const qint8_t *in_top, const qint8_t *in_mid, const qint8_t *in_low, const qint8x8x3_t &m0, const qint8x8x3_t &m1, const qint8x8x3_t &m2, int fixed_point_position)
+inline qint16x8x2_t convolve_3x3<1>(const qint8_t *in_top, const qint8_t *in_mid, const qint8_t *in_low,
+ const qint8x8x3_t &m0, const qint8x8x3_t &m1, const qint8x8x3_t &m2,
+ int fixed_point_position, int input_offset)
{
ARM_COMPUTE_UNUSED(fixed_point_position);
+ ARM_COMPUTE_UNUSED(input_offset);
const qint8x8x3_t vtop =
{
@@ -236,9 +284,13 @@ inline qint16x8x2_t convolve_3x3<1>(const qint8_t *in_top, const qint8_t *in_mid
}
template <>
-inline qint16x8x2_t convolve_3x3<2>(const qint8_t *in_top, const qint8_t *in_mid, const qint8_t *in_low, const qint8x8x3_t &m0, const qint8x8x3_t &m1, const qint8x8x3_t &m2, int fixed_point_position)
+inline qint16x8x2_t convolve_3x3<2>(const qint8_t *in_top, const qint8_t *in_mid, const qint8_t *in_low,
+ const qint8x8x3_t &m0, const qint8x8x3_t &m1, const qint8x8x3_t &m2,
+ int fixed_point_position, int input_offset)
{
- qint16x8x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position);
+ ARM_COMPUTE_UNUSED(input_offset);
+
+ qint16x8x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position, input_offset);
out.val[0] = vsetq_lane_s16(vgetq_lane_s16(out.val[0], 2), out.val[0], 1);
out.val[0] = vsetq_lane_s16(vgetq_lane_s16(out.val[0], 4), out.val[0], 2);
out.val[0] = vsetq_lane_s16(vgetq_lane_s16(out.val[0], 6), out.val[0], 3);
@@ -250,15 +302,153 @@ inline qint16x8x2_t convolve_3x3<2>(const qint8_t *in_top, const qint8_t *in_mid
}
template <>
-inline qint16x8x2_t convolve_3x3<3>(const qint8_t *in_top, const qint8_t *in_mid, const qint8_t *in_low, const qint8x8x3_t &m0, const qint8x8x3_t &m1, const qint8x8x3_t &m2, int fixed_point_position)
+inline qint16x8x2_t convolve_3x3<3>(const qint8_t *in_top, const qint8_t *in_mid, const qint8_t *in_low,
+ const qint8x8x3_t &m0, const qint8x8x3_t &m1, const qint8x8x3_t &m2,
+ int fixed_point_position, int input_offset)
{
- qint16x8x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position);
+ ARM_COMPUTE_UNUSED(input_offset);
+
+ qint16x8x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position, input_offset);
out.val[0] = vsetq_lane_s16(vgetq_lane_s16(out.val[0], 3), out.val[0], 1);
out.val[0] = vsetq_lane_s16(vgetq_lane_s16(out.val[0], 6), out.val[0], 2);
out.val[0] = vsetq_lane_s16(vgetq_lane_s16(out.val[1], 1), out.val[0], 3);
return out;
}
+/** Perform a convolve3x3 on uint8_t
+ *
+ * @param[in] in_top Pointer to the first row of the input.
+ * @param[in] in_mid Pointer to the second row of the input.
+ * @param[in] in_low Pointer to the third row of the input.
+ * @param[in] m0 First row of the filter.
+ * @param[in] m1 Second row of the filter.
+ * @param[in] m2 Third row of the filter.
+ * @param[in] fixed_point_position (Optional) Fixed point position.
+ * @param[in] input_offset (Optional) Input quantization offset.
+ *
+ */
+template <unsigned int stridex>
+int32x4x2_t convolve_3x3(const uint8_t *in_top, const uint8_t *in_mid, const uint8_t *in_low,
+ const int32x4x3_t &m0, const int32x4x3_t &m1, const int32x4x3_t &m2,
+ int fixed_point_position, int input_offset);
+
+template <>
+inline int32x4x2_t convolve_3x3<1>(const uint8_t *in_top, const uint8_t *in_mid, const uint8_t *in_low, const int32x4x3_t &m0, const int32x4x3_t &m1, const int32x4x3_t &m2,
+ int fixed_point_position, int input_offset)
+{
+ ARM_COMPUTE_UNUSED(fixed_point_position);
+
+ const int32x4_t v_input_offset = vdupq_n_s32(input_offset);
+
+ const uint8x8x2_t vtop =
+ {
+ {
+ vld1_u8(in_top),
+ vld1_u8(in_top + 8)
+ }
+ };
+ const uint8x8x2_t vmid =
+ {
+ {
+ vld1_u8(in_mid),
+ vld1_u8(in_mid + 8)
+ }
+ };
+ const uint8x8x2_t vlow =
+ {
+ {
+ vld1_u8(in_low),
+ vld1_u8(in_low + 8)
+ }
+ };
+
+ const int32x4x3_t vtop_s32 =
+ {
+ {
+ vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vtop.val[0])))),
+ vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vtop.val[0])))),
+ vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vtop.val[1])))),
+ }
+ };
+ const int32x4x3_t vmid_s32 =
+ {
+ {
+ vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vmid.val[0])))),
+ vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vmid.val[0])))),
+ vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vmid.val[1])))),
+ }
+ };
+ const int32x4x3_t vlow_s32 =
+ {
+ {
+ vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vlow.val[0])))),
+ vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vlow.val[0])))),
+ vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vlow.val[1])))),
+ }
+ };
+
+ int32x4x2_t out
+ {
+ {
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ }
+ };
+
+ // 0
+ out.val[0] = vmlaq_s32(out.val[0], vtop_s32.val[0], m0.val[0]);
+ out.val[0] = vmlaq_s32(out.val[0], vextq_s32(vtop_s32.val[0], vtop_s32.val[1], 1), m0.val[1]);
+ out.val[0] = vmlaq_s32(out.val[0], vextq_s32(vtop_s32.val[0], vtop_s32.val[1], 2), m0.val[2]);
+
+ out.val[0] = vmlaq_s32(out.val[0], vmid_s32.val[0], m1.val[0]);
+ out.val[0] = vmlaq_s32(out.val[0], vextq_s32(vmid_s32.val[0], vmid_s32.val[1], 1), m1.val[1]);
+ out.val[0] = vmlaq_s32(out.val[0], vextq_s32(vmid_s32.val[0], vmid_s32.val[1], 2), m1.val[2]);
+
+ out.val[0] = vmlaq_s32(out.val[0], vlow_s32.val[0], m2.val[0]);
+ out.val[0] = vmlaq_s32(out.val[0], vextq_s32(vlow_s32.val[0], vlow_s32.val[1], 1), m2.val[1]);
+ out.val[0] = vmlaq_s32(out.val[0], vextq_s32(vlow_s32.val[0], vlow_s32.val[1], 2), m2.val[2]);
+
+ // 1
+ out.val[1] = vmlaq_s32(out.val[1], vtop_s32.val[1], m0.val[0]);
+ out.val[1] = vmlaq_s32(out.val[1], vextq_s32(vtop_s32.val[1], vtop_s32.val[2], 1), m0.val[1]);
+ out.val[1] = vmlaq_s32(out.val[1], vextq_s32(vtop_s32.val[1], vtop_s32.val[2], 2), m0.val[2]);
+
+ out.val[1] = vmlaq_s32(out.val[1], vmid_s32.val[1], m1.val[0]);
+ out.val[1] = vmlaq_s32(out.val[1], vextq_s32(vmid_s32.val[1], vmid_s32.val[2], 1), m1.val[1]);
+ out.val[1] = vmlaq_s32(out.val[1], vextq_s32(vmid_s32.val[1], vmid_s32.val[2], 2), m1.val[2]);
+
+ out.val[1] = vmlaq_s32(out.val[1], vlow_s32.val[1], m2.val[0]);
+ out.val[1] = vmlaq_s32(out.val[1], vextq_s32(vlow_s32.val[1], vlow_s32.val[2], 1), m2.val[1]);
+ out.val[1] = vmlaq_s32(out.val[1], vextq_s32(vlow_s32.val[1], vlow_s32.val[2], 2), m2.val[2]);
+
+ return out;
+}
+
+template <>
+inline int32x4x2_t convolve_3x3<2>(const uint8_t *in_top, const uint8_t *in_mid, const uint8_t *in_low,
+ const int32x4x3_t &m0, const int32x4x3_t &m1, const int32x4x3_t &m2,
+ int fixed_point_position, int input_offset)
+{
+ ARM_COMPUTE_UNUSED(fixed_point_position);
+
+ int32x4x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position, input_offset);
+ out.val[0] = vsetq_lane_s32(vgetq_lane_s32(out.val[0], 2), out.val[0], 1);
+ out.val[0] = vsetq_lane_s32(vgetq_lane_s32(out.val[1], 0), out.val[0], 2);
+ out.val[0] = vsetq_lane_s32(vgetq_lane_s32(out.val[1], 2), out.val[0], 3);
+ return out;
+}
+
+template <>
+inline int32x4x2_t convolve_3x3<3>(const uint8_t *in_top, const uint8_t *in_mid, const uint8_t *in_low,
+ const int32x4x3_t &m0, const int32x4x3_t &m1, const int32x4x3_t &m2,
+ int fixed_point_position, int input_offset)
+{
+ ARM_COMPUTE_UNUSED(fixed_point_position);
+ int32x4x2_t out = convolve_3x3<1>(in_top, in_mid, in_low, m0, m1, m2, fixed_point_position, input_offset);
+ out.val[0] = vsetq_lane_s32(vgetq_lane_s32(out.val[0], 3), out.val[0], 1);
+ return out;
+}
+
/** Stores a float32x4x2_t array into a memory location.
*
* @param[in] buffer Pointer to the memory location where the values will be stored.
@@ -315,6 +505,34 @@ inline void store_results<3>(qint16_t *buffer, const qint16x8x2_t &values)
vst1_qs16(buffer, vget_low_s16(values.val[0]));
}
+/** Stores a uint32_t array into a memory location.
+ *
+ * @param[in] buffer Pointer to the memory location where the values will be stored.
+ * @param[in] values Values that will be stored.
+ *
+ */
+template <unsigned int stridex>
+void store_results(int32_t *buffer, const int32x4x2_t &values);
+
+template <>
+inline void store_results<1>(int32_t *buffer, const int32x4x2_t &values)
+{
+ vst1q_s32(buffer, values.val[0]);
+ vst1q_s32(buffer + 4, values.val[1]);
+}
+
+template <>
+inline void store_results<2>(int32_t *buffer, const int32x4x2_t &values)
+{
+ vst1q_s32(buffer, values.val[0]);
+}
+
+template <>
+inline void store_results<3>(int32_t *buffer, const int32x4x2_t &values)
+{
+ vst1_s32(buffer, vget_low_s32(values.val[0]));
+}
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
/** Loads a 3x3 matrix as a row (float16_t).
*
diff --git a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
index 6208c20227..2100828f0d 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,7 +54,7 @@ public:
NEDepthwiseConvolutionLayer3x3();
/** Initialize the function's source, destination, kernels and border_size.
*
- * @param[in, out] input Source tensor. Data type supported: F32. (Written to only for border filling).
+ * @param[in, out] input Source tensor. Data type supported: QASYMM8, F32. (Written to only for border filling).
* @param[in] weights Weights tensor. These are 3D tensors with shape [3, 3, IFM]. Data type supported: Same as @p input.
* @param[in] biases (Optional) Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
* Data type supported: Same as @p input.
@@ -70,7 +70,9 @@ private:
NEDepthwiseConvolutionLayer3x3Kernel _kernel;
NEDirectConvolutionLayerOutputStageKernel _output_stage_kernel;
NEFillBorderKernel _border_handler;
+ Tensor _accumulator;
bool _has_bias;
+ bool _is_quantized;
};
/** Basic function to execute a generic depthwise convolution. This function calls the following NEON kernels:
diff --git a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
index 40a8601aaa..bc2f1ed266 100644
--- a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
@@ -42,72 +42,18 @@ using namespace arm_compute;
using namespace arm_compute::detail;
using namespace arm_compute::misc::shape_calculator;
-NEDepthwiseConvolutionLayer3x3Kernel::NEDepthwiseConvolutionLayer3x3Kernel()
- : _border_size(0), _input(), _output(), _weights(), _conv_info()
-{
-}
-
-BorderSize NEDepthwiseConvolutionLayer3x3Kernel::border_size() const
+namespace
{
- return _border_size;
-}
-
-void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info)
-{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) != 3 || weights->info()->dimension(1) != 3);
-
- // Get convolved dimensions
- const TensorShape output_shape = compute_depthwise_convolution_shape(*input->info(), *weights->info(), conv_info);
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(),
- output_shape,
- 1,
- input->info()->data_type(),
- input->info()->fixed_point_position(),
- input->info()->quantization_info());
-
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
-
- _input = input;
- _output = output;
- _weights = weights;
- _conv_info = conv_info;
- const unsigned int conv_stride_x = conv_info.stride().first;
- const unsigned int conv_stride_y = conv_info.stride().second;
- const unsigned int conv_pad_left = conv_info.pad_left();
- const unsigned int conv_pad_top = conv_info.pad_top();
-
- ARM_COMPUTE_ERROR_ON(conv_stride_x < 1 || conv_stride_x > 3);
-
- const unsigned int num_elems_written_per_iteration = 16 >> conv_stride_x;
- _border_size = BorderSize(conv_pad_top, conv_info.pad_right(), conv_info.pad_bottom(), conv_pad_left);
-
- // Configure kernel window
- Window win = calculate_max_window(*output->info(), Steps(num_elems_written_per_iteration));
-
- const unsigned int num_x_steps = (output_shape.x() + num_elems_written_per_iteration - 1) / num_elems_written_per_iteration;
- const int input_num_elems_processed = get_input_num_elems_processed(num_elems_written_per_iteration, conv_stride_x);
-
- AccessWindowStatic input_access(input->info(), -conv_pad_left, -conv_pad_top, (num_x_steps - 1) * input_num_elems_processed + 12, conv_stride_y * (output_shape.y() - 1) + 2);
- AccessWindowStatic weights_access(weights->info(), 0, 0, weights->info()->dimension(0), weights->info()->dimension(1));
- AccessWindowStatic output_access(output->info(), 0, 0, num_x_steps * num_elems_written_per_iteration, output_shape.y());
-
- update_window_and_padding(win, input_access, weights_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
-
- INEKernel::configure(win);
-}
-
-template <unsigned int stridex>
+template <typename T1, typename T2, unsigned int stridex>
class convolver_3x3
{
public:
static void convolve(const Window &window, unsigned int num_elems_written_per_iteration,
const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info)
{
+ const int input_offset = -input->info()->quantization_info().offset;
+ const int weights_offset = -weights->info()->quantization_info().offset;
+
const int input_stride_x = input->info()->strides_in_bytes().x();
const int input_stride_y = input->info()->strides_in_bytes().y();
const int output_stride_y = output->info()->strides_in_bytes().y();
@@ -117,8 +63,8 @@ public:
const int output_h = output->info()->dimension(1);
const int delta_input = get_input_num_elems_processed<stridex>(num_elems_written_per_iteration);
const unsigned int conv_stride_y = std::get<1>(conv_info.stride());
- const unsigned int conv_pad_x = std::get<0>(conv_info.pad());
- const unsigned int conv_pad_y = std::get<1>(conv_info.pad());
+ const unsigned int conv_pad_x = conv_info.pad_left();
+ const unsigned int conv_pad_y = conv_info.pad_top();
// setup output window for the iterator
Window window_out = window;
@@ -141,29 +87,31 @@ public:
execute_window_loop(window_out, [&](const Coordinates & id)
{
- const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y;
- int ih = 0;
- int oh = 0;
-
- const uint8_t *ptr_weights_base = weights_ptr + id.z() * kernel_stride_z;
- const auto ptr_weights_r0 = reinterpret_cast<const float *>(ptr_weights_base);
- const auto ptr_weights_r1 = reinterpret_cast<const float *>(ptr_weights_base + kernel_stride_y);
- const auto ptr_weights_r2 = reinterpret_cast<const float *>(ptr_weights_base + kernel_stride_y * 2);
- const float32x4x3_t vw_r0 = load_matrix_row(ptr_weights_r0);
- const float32x4x3_t vw_r1 = load_matrix_row(ptr_weights_r1);
- const float32x4x3_t vw_r2 = load_matrix_row(ptr_weights_r2);
+ int ih = 0;
+ int oh = 0;
+
+ const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y;
+ const uint8_t *ptr_weights_base = weights_ptr + id.z() * kernel_stride_z;
+
+ const auto ptr_weights_r0 = reinterpret_cast<const T1 *>(ptr_weights_base);
+ const auto ptr_weights_r1 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y);
+ const auto ptr_weights_r2 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y * 2);
+ const auto vw_r0 = load_matrix_row(ptr_weights_r0, weights_offset);
+ const auto vw_r1 = load_matrix_row(ptr_weights_r1, weights_offset);
+ const auto vw_r2 = load_matrix_row(ptr_weights_r2, weights_offset);
for(ih = 0, oh = 0; oh < output_h; ++oh, ih += conv_stride_y)
{
- auto in_top = reinterpret_cast<const float *>(input_ptr + (ih + 0) * input_stride_y);
- auto in_mid = reinterpret_cast<const float *>(input_ptr + (ih + 1) * input_stride_y);
- auto in_low = reinterpret_cast<const float *>(input_ptr + (ih + 2) * input_stride_y);
- auto p_out = reinterpret_cast<float *>(out.ptr() + oh * output_stride_y);
+ auto in_top = reinterpret_cast<const T1 *>(input_ptr + (ih + 0) * input_stride_y);
+ auto in_mid = reinterpret_cast<const T1 *>(input_ptr + (ih + 1) * input_stride_y);
+ auto in_low = reinterpret_cast<const T1 *>(input_ptr + (ih + 2) * input_stride_y);
+ auto p_out = reinterpret_cast<T2 *>(out.ptr() + oh * output_stride_y);
for(int ow = 0; ow < output_w; ow += num_elems_written_per_iteration,
- in_top += delta_input, in_mid += delta_input, in_low += delta_input, p_out += num_elems_written_per_iteration)
+ in_top += delta_input, in_mid += delta_input, in_low += delta_input,
+ p_out += num_elems_written_per_iteration)
{
- auto vres = convolve_3x3<stridex>(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, 0);
+ auto vres = convolve_3x3<stridex>(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, 0, input_offset);
store_results<stridex>(p_out, vres);
}
}
@@ -172,24 +120,113 @@ public:
}
};
-void NEDepthwiseConvolutionLayer3x3Kernel::run(const Window &window, const ThreadInfo &info)
+template <typename T1, typename T2>
+inline void convolve_3x3(const Window &window, unsigned int num_elems_written_per_iteration,
+ const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info)
{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_UNUSED(info);
-
- const unsigned int conv_stride_x = _conv_info.stride().first;
- const unsigned int num_elems_written_per_iteration = 16 >> conv_stride_x;
-
+ const unsigned int conv_stride_x = std::get<0>(conv_info.stride());
switch(conv_stride_x)
{
case 1:
- convolver_3x3<1>::convolve(window, num_elems_written_per_iteration, _input, _weights, _output, _conv_info);
+ convolver_3x3<T1, T2, 1>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info);
break;
case 2:
- convolver_3x3<2>::convolve(window, num_elems_written_per_iteration, _input, _weights, _output, _conv_info);
+ convolver_3x3<T1, T2, 2>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info);
break;
case 3:
- convolver_3x3<3>::convolve(window, num_elems_written_per_iteration, _input, _weights, _output, _conv_info);
+ convolver_3x3<T1, T2, 3>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+}
+} // namespace
+
+NEDepthwiseConvolutionLayer3x3Kernel::NEDepthwiseConvolutionLayer3x3Kernel()
+ : _border_size(0), _input(), _output(), _weights(), _conv_info(), _num_elems_written_per_iteration(0)
+{
+}
+
+BorderSize NEDepthwiseConvolutionLayer3x3Kernel::border_size() const
+{
+ return _border_size;
+}
+
+void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) != 3 || weights->info()->dimension(1) != 3);
+
+ // Get convolved dimensions
+ const TensorShape output_shape = compute_depthwise_convolution_shape(*input->info(), *weights->info(), conv_info);
+ const DataType output_dt = (input->info()->data_type() == DataType::QASYMM8) ? DataType::S32 : input->info()->data_type();
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output->info(),
+ input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_data_type(output_dt));
+
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
+
+ _input = input;
+ _output = output;
+ _weights = weights;
+ _conv_info = conv_info;
+ const unsigned int conv_stride_x = conv_info.stride().first;
+ const unsigned int conv_stride_y = conv_info.stride().second;
+ const unsigned int conv_pad_left = conv_info.pad_left();
+ const unsigned int conv_pad_top = conv_info.pad_top();
+
+ ARM_COMPUTE_ERROR_ON(conv_stride_x < 1 || conv_stride_x > 3);
+
+ unsigned int num_elems_read_per_iteration = 0;
+ switch(input->info()->data_type())
+ {
+ case DataType::QASYMM8:
+ num_elems_read_per_iteration = 16;
+ _num_elems_written_per_iteration = 16 >> conv_stride_x;
+ break;
+ case DataType::F32:
+ num_elems_read_per_iteration = 12;
+ _num_elems_written_per_iteration = 16 >> conv_stride_x;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data type not supported.");
+ }
+ _border_size = BorderSize(conv_pad_top, conv_info.pad_right(), conv_info.pad_bottom(), conv_pad_left);
+
+ // Configure kernel window
+ Window win = calculate_max_window(*output->info(), Steps(_num_elems_written_per_iteration));
+
+ const unsigned int num_x_steps = (output_shape.x() + _num_elems_written_per_iteration - 1) / _num_elems_written_per_iteration;
+ const int input_num_elems_processed = get_input_num_elems_processed(_num_elems_written_per_iteration, conv_stride_x);
+
+ AccessWindowStatic input_access(input->info(),
+ -conv_pad_left,
+ -conv_pad_top,
+ (num_x_steps - 1) * input_num_elems_processed + num_elems_read_per_iteration,
+ conv_stride_y * (output_shape.y() - 1) + 2);
+ AccessWindowStatic weights_access(weights->info(), 0, 0, weights->info()->dimension(0), weights->info()->dimension(1));
+ AccessWindowStatic output_access(output->info(), 0, 0, num_x_steps * _num_elems_written_per_iteration, output_shape.y());
+
+ update_window_and_padding(win, input_access, weights_access, output_access);
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
+
+ INEKernel::configure(win);
+}
+
+void NEDepthwiseConvolutionLayer3x3Kernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_UNUSED(info);
+
+ switch(_input->info()->data_type())
+ {
+ case DataType::F32:
+ convolve_3x3<float, float>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info);
+ break;
+ case DataType::QASYMM8:
+ convolve_3x3<uint8_t, int32_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info);
break;
default:
ARM_COMPUTE_ERROR("Not implemented");
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
index 40abdb1672..52880a378f 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,6 +27,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/NEAsymm.h"
#include "arm_compute/core/NEON/NEFixedPoint.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
@@ -43,24 +44,26 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::QS32, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8,
+ DataType::QS16, DataType::F16,
+ DataType::QS32, DataType::S32, DataType::F32);
if(bias != nullptr)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::QS32, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::QS32, DataType::S32, DataType::F32);
- if(is_data_type_quantized(input->data_type()))
+ if(is_data_type_fixed_point(input->data_type()))
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS8 && bias->data_type() != DataType::QS8, "Wrong data type for bias");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS16 && bias->data_type() != DataType::QS8, "Wrong data type for bias");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS32 && bias->data_type() != DataType::QS16, "Wrong data type for bias");
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, bias);
}
else
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
}
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, bias);
ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
}
else
@@ -71,18 +74,22 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con
// Checks performed when output is configured
if((output != nullptr) && (output->total_size() != 0))
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QS8, DataType::QS16, DataType::F32);
- if(is_data_type_quantized(input->data_type()))
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F32);
+ if(is_data_type_fixed_point(input->data_type()))
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS8 && output->data_type() != DataType::QS8, "Wrong data type for output");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS16 && output->data_type() != DataType::QS8, "Wrong data type for output");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS32 && output->data_type() != DataType::QS16, "Wrong data type for output");
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, output);
+ }
+ else if(is_data_type_quantized_asymmetric(output->data_type()))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::S32 && output->data_type() != DataType::QASYMM8, "Wrong data type for bias");
}
else
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, output);
}
return Status{};
@@ -90,8 +97,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con
std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output)
{
- bool window_changed = false;
- const unsigned int num_elems_processed_per_iteration = 16 / element_size_from_data_type(input->data_type());
+ bool window_changed = false;
+ unsigned int num_elems_processed_per_iteration = 16 / element_size_from_data_type(input->data_type());
+
+ // Update processed elements when input is S32 (comes from quantization input)
+ if(input->data_type() == DataType::S32)
+ {
+ num_elems_processed_per_iteration = 16;
+ }
// Configure kernel window
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
@@ -145,7 +158,6 @@ inline qint16x8_t internal_vld1q(const qint16_t *in)
{
return vld1q_qs16(in);
}
-
inline qint32x4_t internal_vld1q(const qint32_t *in)
{
return vld1q_s32(in);
@@ -168,7 +180,6 @@ inline void internal_vst1q(qint16_t *p, const qint16x8_t &v)
{
vst1q_qs16(p, v);
}
-
inline void internal_vst1q(qint32_t *p, const qint32x4_t &v)
{
vst1q_s32(p, v);
@@ -192,7 +203,6 @@ inline qint16x8_t internal_vdupq_n(qint16_t v)
{
return vdupq_n_qs16(v);
}
-
inline qint32x4_t internal_vdupq_n(qint32_t v)
{
return vdupq_n_qs32(v);
@@ -236,8 +246,13 @@ inline float16x8_t internal_vqaddq(const float16x8_t &x, const float16x8_t &y)
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
template <typename T1, typename T2, bool in_place, bool has_bias>
-void output_stage(ITensor *input, const ITensor *bias, const Window window, ITensor *output)
+void output_stage(ITensor *input, const ITensor *bias, const Window &window, ITensor *output,
+ int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift)
{
+ ARM_COMPUTE_UNUSED(result_fixedpoint_multiplier);
+ ARM_COMPUTE_UNUSED(result_shift);
+ ARM_COMPUTE_UNUSED(result_offset_after_shift);
+
Iterator in(input, window);
if(in_place) // In place accumulate
@@ -283,31 +298,112 @@ void output_stage(ITensor *input, const ITensor *bias, const Window window, ITen
in, out);
}
}
+
+// QASYMM8 specializations
+template <>
+void output_stage<int32_t, uint8_t, false, true>(ITensor *input, const ITensor *bias, const Window &window, ITensor *output,
+ int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift)
+{
+ const int32x4_t result_offset_after_shift_s32 = vdupq_n_s32(result_offset_after_shift);
+ uint8x16_t min = vdupq_n_u8(0);
+ uint8x16_t max = vdupq_n_u8(255);
+
+ Iterator in(input, window);
+ Iterator out(output, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ // Get bias and pointer to input
+ const auto in_ptr = reinterpret_cast<int32_t *>(in.ptr());
+ int32x4x4_t v_in =
+ {
+ {
+ vld1q_s32(in_ptr),
+ vld1q_s32(in_ptr + 4),
+ vld1q_s32(in_ptr + 8),
+ vld1q_s32(in_ptr + 12)
+ }
+ };
+
+ // Accumulate bias
+ const auto vb = vdupq_n_s32(*reinterpret_cast<const int32_t *>(bias->ptr_to_element(Coordinates(id.z()))));
+ v_in =
+ {
+ {
+ vaddq_s32(v_in.val[0], vb),
+ vaddq_s32(v_in.val[1], vb),
+ vaddq_s32(v_in.val[2], vb),
+ vaddq_s32(v_in.val[3], vb)
+ }
+ };
+
+ const auto out_ptr = reinterpret_cast<uint8_t *>(out.ptr());
+ vst1q_u8(out_ptr, finalize_quantization<false>(v_in, result_fixedpoint_multiplier, result_shift, result_offset_after_shift_s32, min, max));
+ },
+ in, out);
+}
+template <>
+void output_stage<int32_t, uint8_t, false, false>(ITensor *input, const ITensor *bias, const Window &window, ITensor *output,
+ int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift)
+{
+ ARM_COMPUTE_UNUSED(bias);
+
+ const int32x4_t result_offset_after_shift_s32 = vdupq_n_s32(result_offset_after_shift);
+ uint8x16_t min = vdupq_n_u8(0);
+ uint8x16_t max = vdupq_n_u8(255);
+
+ Iterator in(input, window);
+ Iterator out(output, window);
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ // Get bias and pointer to input
+ const auto in_ptr = reinterpret_cast<int32_t *>(in.ptr());
+ int32x4x4_t v_in =
+ {
+ {
+ vld1q_s32(in_ptr),
+ vld1q_s32(in_ptr + 4),
+ vld1q_s32(in_ptr + 8),
+ vld1q_s32(in_ptr + 12)
+ }
+ };
+
+ const auto out_ptr = reinterpret_cast<uint8_t *>(out.ptr());
+ vst1q_u8(out_ptr, finalize_quantization<false>(v_in, result_fixedpoint_multiplier, result_shift, result_offset_after_shift_s32, min, max));
+ },
+ in, out);
+}
} // namespace
NEDirectConvolutionLayerOutputStageKernel::NEDirectConvolutionLayerOutputStageKernel()
- : _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr)
+ : _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr), _result_fixedpoint_multiplier(0), _result_shift(0), _result_offset_after_shift(0)
{
}
-void NEDirectConvolutionLayerOutputStageKernel::configure(ITensor *input, const ITensor *bias, ITensor *output)
+void NEDirectConvolutionLayerOutputStageKernel::configure(ITensor *input, const ITensor *bias, ITensor *output,
+ int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
// Auto-initialize output output if required
if(output != nullptr)
{
+ // Work out expected output data type
+ const DataType output_dt = (input->info()->data_type() == DataType::S32) ? DataType::QASYMM8 : input->info()->data_type();
// Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), *input->info());
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_dt));
}
// Perform validation step
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias == nullptr) ? nullptr : bias->info(), (output == nullptr) ? nullptr : output->info()));
- _func = nullptr;
- _bias = bias;
- _input = input;
- _output = output;
+ _func = nullptr;
+ _bias = bias;
+ _input = input;
+ _output = output;
+ _result_fixedpoint_multiplier = result_fixedpoint_multiplier;
+ _result_shift = result_shift;
+ _result_offset_after_shift = result_offset_after_shift;
// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), (bias == nullptr) ? nullptr : bias->info(), (output == nullptr) ? nullptr : output->info());
@@ -350,6 +446,9 @@ void NEDirectConvolutionLayerOutputStageKernel::configure(ITensor *input, const
_func = (output == nullptr) ? &output_stage<qint32_t, qint16_t, true, true> : &output_stage<qint32_t, qint16_t, false, true>;
break;
}
+ case DataType::S32:
+ _func = (bias == nullptr) ? &output_stage<int32_t, uint8_t, false, false> : &output_stage<int32_t, uint8_t, false, true>;
+ break;
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
{
@@ -365,7 +464,6 @@ void NEDirectConvolutionLayerOutputStageKernel::configure(ITensor *input, const
default:
{
ARM_COMPUTE_ERROR("Unsupported combination of types among the inputs.");
- break;
}
}
}
@@ -385,5 +483,5 @@ void NEDirectConvolutionLayerOutputStageKernel::run(const Window &window, const
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
ARM_COMPUTE_ERROR_ON(_func == nullptr);
- (*_func)(_input, _bias, window, _output);
+ (*_func)(_input, _bias, window, _output, _result_fixedpoint_multiplier, _result_shift, _result_offset_after_shift);
}
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
index 8b3f2383ab..5e14e1a95d 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -96,57 +96,11 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
+} // namespace
-template <bool is_bounded_relu>
-inline uint8x16_t finalize_quantization(int32x4x4_t &in_s32, int result_fixedpoint_multiplier, int32_t result_shift, int32x4_t result_offset_after_shift_s32, uint8x16_t min_u8,
- uint8x16_t max_u8)
+namespace arm_compute
{
- const static int32x4_t zero_s32 = vdupq_n_s32(0);
-
- // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar
- in_s32.val[0] = vqrdmulhq_n_s32(in_s32.val[0], result_fixedpoint_multiplier);
- in_s32.val[1] = vqrdmulhq_n_s32(in_s32.val[1], result_fixedpoint_multiplier);
- in_s32.val[2] = vqrdmulhq_n_s32(in_s32.val[2], result_fixedpoint_multiplier);
- in_s32.val[3] = vqrdmulhq_n_s32(in_s32.val[3], result_fixedpoint_multiplier);
-
- // Round to the nearest division by a power-of-two using result_shift_s32
- in_s32.val[0] = rounding_divide_by_pow2(in_s32.val[0], result_shift);
- in_s32.val[1] = rounding_divide_by_pow2(in_s32.val[1], result_shift);
- in_s32.val[2] = rounding_divide_by_pow2(in_s32.val[2], result_shift);
- in_s32.val[3] = rounding_divide_by_pow2(in_s32.val[3], result_shift);
-
- // Add the offset terms
- in_s32.val[0] = vaddq_s32(in_s32.val[0], result_offset_after_shift_s32);
- in_s32.val[1] = vaddq_s32(in_s32.val[1], result_offset_after_shift_s32);
- in_s32.val[2] = vaddq_s32(in_s32.val[2], result_offset_after_shift_s32);
- in_s32.val[3] = vaddq_s32(in_s32.val[3], result_offset_after_shift_s32);
-
- // Saturate negative values
- in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
- in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
- in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
- in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
-
- // Convert S32 to S16
- const int16x8x2_t in_s16 =
- {
- {
- vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
- vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
- }
- };
-
- // Convert S16 to U8
- uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
-
- if(is_bounded_relu)
- {
- out_u8 = vmaxq_u8(out_u8, min_u8);
- out_u8 = vminq_u8(out_u8, max_u8);
- }
-
- return out_u8;
-}
+class Coordinates;
/* Function used by the left-over for loop to perform the quantization */
template <bool is_bounded_relu>
@@ -178,11 +132,6 @@ inline uint8_t finalize_quantization(int32x4_t in_s32, int result_fixedpoint_mul
return out_u8;
}
-} // namespace
-
-namespace arm_compute
-{
-class Coordinates;
} // namespace arm_compute
template <bool is_bounded_relu>
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index 298101a09d..2d08b45210 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,28 +26,56 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/PixelValue.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "support/ToolchainSupport.h"
using namespace arm_compute;
NEDepthwiseConvolutionLayer3x3::NEDepthwiseConvolutionLayer3x3()
- : _kernel(), _output_stage_kernel(), _border_handler(), _has_bias(false)
+ : _kernel(), _output_stage_kernel(), _border_handler(), _accumulator(), _has_bias(false), _is_quantized(false)
{
}
void NEDepthwiseConvolutionLayer3x3::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- // Configure kernels
- _kernel.configure(input, weights, output, conv_info);
- _border_handler.configure(input, _kernel.border_size(), BorderMode::CONSTANT, PixelValue(static_cast<float>(0.f)));
- if(biases != nullptr)
+ PixelValue zero_value(0.f);
+
+ _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
+ _has_bias = biases != nullptr;
+
+ // Allocate the intermediate accumulator tensor in case of fixed point input
+ if(_is_quantized)
+ {
+ _accumulator.allocator()->init(TensorInfo(output->info()->tensor_shape(), 1, DataType::S32));
+ _accumulator.info()->set_quantization_info(input->info()->quantization_info());
+ zero_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().offset));
+ }
+
+ // Configure depthwise convolution kernel
+ _kernel.configure(input, weights, (_is_quantized) ? &_accumulator : output, conv_info);
+
+ // Configure border handler
+ _border_handler.configure(input, _kernel.border_size(), BorderMode::CONSTANT, zero_value);
+
+ // Configure biases accumulation
+ if(_has_bias || _is_quantized)
{
- _output_stage_kernel.configure(output, biases);
- _has_bias = true;
+ if(_is_quantized)
+ {
+ float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
+ int output_multiplier, output_shift;
+ quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+ _output_stage_kernel.configure(&_accumulator, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
+ _accumulator.allocator()->allocate();
+ }
+ else
+ {
+ _output_stage_kernel.configure(output, biases);
+ }
}
}
@@ -55,7 +83,7 @@ void NEDepthwiseConvolutionLayer3x3::run()
{
NEScheduler::get().schedule(&_border_handler, Window::DimX);
NEScheduler::get().schedule(&_kernel, Window::DimX);
- if(_has_bias)
+ if(_has_bias || _is_quantized)
{
NEScheduler::get().schedule(&_output_stage_kernel, Window::DimX);
}
diff --git a/tests/validation/CL/DepthwiseConvolutionLayer.cpp b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
index 92a2773e54..43e04fbf07 100644
--- a/tests/validation/CL/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,7 +43,7 @@ namespace validation
namespace
{
constexpr RelativeTolerance<float> tolerance_f32(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
-constexpr RelativeTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QASYMM8 */
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QASYMM8 */
} // namespace
TEST_SUITE(CL)
@@ -96,13 +96,13 @@ TEST_SUITE(QASYMM8)
TEST_SUITE(W3x3)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127) })))
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127) })))
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
diff --git a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
index 420c9744d8..e8c771595e 100644
--- a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,8 @@ namespace validation
{
namespace
{
-constexpr RelativeTolerance<float> tolerance_f32(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+constexpr RelativeTolerance<float> tolerance_f32(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QASYMM8 */
} // namespace
TEST_SUITE(NEON)
@@ -125,6 +126,28 @@ TEST_SUITE_END()
TEST_SUITE_END()
+template <typename T>
+using NEDepthwiseConvolutionLayerQuantizedFixture3x3 = DepthwiseConvolutionLayerValidationQuantizedFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer3x3, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+TEST_SUITE(W3x3)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+TEST_SUITE_END()
+
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index fc48bcec72..df5436fcf7 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,6 +34,8 @@
#include "tests/validation/Helpers.h"
#include "tests/validation/reference/DepthwiseConvolutionLayer.h"
+#include "utils/Utils.h"
+
#include <random>
namespace arm_compute
@@ -82,7 +84,7 @@ protected:
}
case DataType::S32:
{
- std::uniform_int_distribution<int32_t> distribution(-1000, 1000);
+ std::uniform_int_distribution<int32_t> distribution(-100, 100);
library->fill(tensor, distribution, i);
break;
}
@@ -136,7 +138,7 @@ protected:
{
SimpleTensor<T> src{ in_shape, data_type, 1, 0, quantization_info };
SimpleTensor<T> weights{ weights_shape, data_type, 1, 0, quantization_info };
- SimpleTensor<TBias> biases{ biases_shape, data_type, 1, 0, quantization_info };
+ SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, 0, quantization_info };
fill(src, 0);
fill(weights, 1);
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
index 08caa8efb8..6ca347f1d4 100644
--- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -156,17 +156,17 @@ SimpleTensor<uint8_t> depthwise_convolution(const SimpleTensor<uint8_t> &src, co
{
for(int x = minimum_x; x < input_width + pad_x - filter_half_size; x += conv_info.stride().first)
{
- Coordinates coords(x, y, z);
+ Coordinates coords(x, y, z, r);
int filter_offset = filter_plane * z;
- uint32_t val = 0;
+ int32_t val = 0;
for(int j = y - filter_half_size; j <= (y + filter_half_size); ++j)
{
for(int i = x - filter_half_size; i <= (x + filter_half_size); ++i)
{
coords.set(0, i);
coords.set(1, j);
- auto in_val = tensor_elem_at<uint8_t>(src, coords, BorderMode::CONSTANT, 0);
+ auto in_val = tensor_elem_at<uint8_t>(src, coords, BorderMode::CONSTANT, -input_offset);
uint8_t w_val = *(weights.data() + filter_offset);
val += (in_val + input_offset) * (w_val + weights_offset);
++filter_offset;