diff options
author | Chunosov <N.Chunosov@yandex.ru> | 2017-11-03 17:33:15 +0700 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:35:24 +0000 |
commit | d621bca4e963555a99be4328c8d49d1813789649 (patch) | |
tree | 59503f9d4cdbaafefdba5a2569bf3d88082ad09d /tests | |
parent | 5a99ddf2dcf3a5eb49ea85cb8bcc6a43f1496e5e (diff) | |
download | ComputeLibrary-d621bca4e963555a99be4328c8d49d1813789649.tar.gz |
COMPMID-661: directconv-uint8 (#20)
Change-Id: I84f7a1ce3658be0d3c91e65096467258af48f0b6
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94341
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/CL/CLAccessor.h | 24 | ||||
-rw-r--r-- | tests/IAccessor.h | 3 | ||||
-rw-r--r-- | tests/NEON/Accessor.h | 24 | ||||
-rw-r--r-- | tests/SimpleTensor.h | 38 | ||||
-rw-r--r-- | tests/Utils.h | 11 | ||||
-rw-r--r-- | tests/validation/CL/DirectConvolutionLayer.cpp | 46 | ||||
-rw-r--r-- | tests/validation/CPP/ConvolutionLayer.cpp | 118 | ||||
-rw-r--r-- | tests/validation/CPP/UtilsQuantizedAsymm.h | 57 | ||||
-rw-r--r-- | tests/validation/fixtures/DirectConvolutionLayerFixture.h | 75 |
9 files changed, 309 insertions, 87 deletions
diff --git a/tests/CL/CLAccessor.h b/tests/CL/CLAccessor.h index 2f955653c8..9e7b73f34f 100644 --- a/tests/CL/CLAccessor.h +++ b/tests/CL/CLAccessor.h @@ -52,15 +52,16 @@ public: /** Destructor that unmaps the CL memory. */ ~CLAccessor(); - TensorShape shape() const override; - size_t element_size() const override; - size_t size() const override; - Format format() const override; - DataType data_type() const override; - int num_channels() const override; - int num_elements() const override; - PaddingSize padding() const override; - int fixed_point_position() const override; + TensorShape shape() const override; + size_t element_size() const override; + size_t size() const override; + Format format() const override; + DataType data_type() const override; + int num_channels() const override; + int num_elements() const override; + PaddingSize padding() const override; + int fixed_point_position() const override; + QuantizationInfo quantization_info() const override; const void *operator()(const Coordinates &coord) const override; void *operator()(const Coordinates &coord) override; const void *data() const; @@ -126,6 +127,11 @@ inline int CLAccessor::fixed_point_position() const return _tensor.info()->fixed_point_position(); } +inline QuantizationInfo CLAccessor::quantization_info() const +{ + return _tensor.info()->quantization_info(); +} + inline const void *CLAccessor::data() const { return _tensor.buffer(); diff --git a/tests/IAccessor.h b/tests/IAccessor.h index ef06e9e9da..3744fc8c02 100644 --- a/tests/IAccessor.h +++ b/tests/IAccessor.h @@ -67,6 +67,9 @@ public: /** Number of bits for the fractional part. */ virtual int fixed_point_position() const = 0; + /** Quantization info in case of asymmetric quantized type */ + virtual QuantizationInfo quantization_info() const = 0; + /** Read only access to the specified element. * * @param[in] coord Coordinates of the desired element. diff --git a/tests/NEON/Accessor.h b/tests/NEON/Accessor.h index e0ff35231c..2bad53b3fe 100644 --- a/tests/NEON/Accessor.h +++ b/tests/NEON/Accessor.h @@ -46,15 +46,16 @@ public: Accessor(Accessor &&) = default; Accessor &operator=(Accessor &&) = default; - TensorShape shape() const override; - size_t element_size() const override; - size_t size() const override; - Format format() const override; - DataType data_type() const override; - int num_channels() const override; - int num_elements() const override; - PaddingSize padding() const override; - int fixed_point_position() const override; + TensorShape shape() const override; + size_t element_size() const override; + size_t size() const override; + Format format() const override; + DataType data_type() const override; + int num_channels() const override; + int num_elements() const override; + PaddingSize padding() const override; + int fixed_point_position() const override; + QuantizationInfo quantization_info() const override; const void *operator()(const Coordinates &coord) const override; void *operator()(const Coordinates &coord) override; const void *data() const; @@ -114,6 +115,11 @@ inline int Accessor::fixed_point_position() const return _tensor.info()->fixed_point_position(); } +inline QuantizationInfo Accessor::quantization_info() const +{ + return _tensor.info()->quantization_info(); +} + inline const void *Accessor::data() const { return _tensor.buffer(); diff --git a/tests/SimpleTensor.h b/tests/SimpleTensor.h index 0f79a3899a..6091991e66 100644 --- a/tests/SimpleTensor.h +++ b/tests/SimpleTensor.h @@ -76,8 +76,11 @@ public: * @param[in] data_type Data type of the new raw tensor. * @param[in] num_channels (Optional) Number of channels (default = 1). * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers (default = 0). + * @param[in] quantization_info (Optional) Quantization info for asymmetric quantization (default = empty). */ - SimpleTensor(TensorShape shape, DataType data_type, int num_channels = 1, int fixed_point_position = 0); + SimpleTensor(TensorShape shape, DataType data_type, + int num_channels = 1, + int fixed_point_position = 0, QuantizationInfo quantization_info = QuantizationInfo()); /** Create a deep copy of the given @p tensor. * @@ -137,6 +140,9 @@ public: /** The number of bits for the fractional part of the fixed point numbers. */ int fixed_point_position() const override; + /** Quantization info in case of asymmetric quantized type */ + QuantizationInfo quantization_info() const override; + /** Constant pointer to the underlying buffer. */ const T *data() const; @@ -168,12 +174,13 @@ public: friend void swap(SimpleTensor<U> &tensor1, SimpleTensor<U> &tensor2); protected: - Buffer _buffer{ nullptr }; - TensorShape _shape{}; - Format _format{ Format::UNKNOWN }; - DataType _data_type{ DataType::UNKNOWN }; - int _num_channels{ 0 }; - int _fixed_point_position{ 0 }; + Buffer _buffer{ nullptr }; + TensorShape _shape{}; + Format _format{ Format::UNKNOWN }; + DataType _data_type{ DataType::UNKNOWN }; + int _num_channels{ 0 }; + int _fixed_point_position{ 0 }; + QuantizationInfo _quantization_info{}; }; template <typename T> @@ -181,18 +188,20 @@ SimpleTensor<T>::SimpleTensor(TensorShape shape, Format format, int fixed_point_ : _buffer(nullptr), _shape(shape), _format(format), - _fixed_point_position(fixed_point_position) + _fixed_point_position(fixed_point_position), + _quantization_info() { _buffer = support::cpp14::make_unique<T[]>(num_elements() * num_channels()); } template <typename T> -SimpleTensor<T>::SimpleTensor(TensorShape shape, DataType data_type, int num_channels, int fixed_point_position) +SimpleTensor<T>::SimpleTensor(TensorShape shape, DataType data_type, int num_channels, int fixed_point_position, QuantizationInfo quantization_info) : _buffer(nullptr), _shape(shape), _data_type(data_type), _num_channels(num_channels), - _fixed_point_position(fixed_point_position) + _fixed_point_position(fixed_point_position), + _quantization_info(quantization_info) { _buffer = support::cpp14::make_unique<T[]>(num_elements() * this->num_channels()); } @@ -204,7 +213,8 @@ SimpleTensor<T>::SimpleTensor(const SimpleTensor &tensor) _format(tensor.format()), _data_type(tensor.data_type()), _num_channels(tensor.num_channels()), - _fixed_point_position(tensor.fixed_point_position()) + _fixed_point_position(tensor.fixed_point_position()), + _quantization_info(tensor.quantization_info()) { _buffer = support::cpp14::make_unique<T[]>(tensor.num_elements() * num_channels()); std::copy_n(tensor.data(), num_elements() * num_channels(), _buffer.get()); @@ -249,6 +259,12 @@ int SimpleTensor<T>::fixed_point_position() const } template <typename T> +QuantizationInfo SimpleTensor<T>::quantization_info() const +{ + return _quantization_info; +} + +template <typename T> size_t SimpleTensor<T>::size() const { const size_t size = std::accumulate(_shape.cbegin(), _shape.cend(), 1, std::multiplies<size_t>()); diff --git a/tests/Utils.h b/tests/Utils.h index 465cba88ab..70def45ec7 100644 --- a/tests/Utils.h +++ b/tests/Utils.h @@ -230,6 +230,7 @@ void store_value_with_data_type(void *ptr, T value, DataType data_type) switch(data_type) { case DataType::U8: + case DataType::QASYMM8: *reinterpret_cast<uint8_t *>(ptr) = value; break; case DataType::S8: @@ -385,14 +386,18 @@ inline bool is_in_valid_region(const ValidRegion &valid_region, Coordinates coor * @param[in] data_type Data type. * @param[in] num_channels (Optional) Number of channels. * @param[in] fixed_point_position (Optional) Number of fractional bits. + * @param[in] quantization_info (Optional) Quantization info for asymmetric quantized types. * * @return Initialized tensor of given type. */ template <typename T> -inline T create_tensor(const TensorShape &shape, DataType data_type, int num_channels = 1, int fixed_point_position = 0) +inline T create_tensor(const TensorShape &shape, DataType data_type, int num_channels = 1, + int fixed_point_position = 0, QuantizationInfo quantization_info = QuantizationInfo()) { - T tensor; - tensor.allocator()->init(TensorInfo(shape, num_channels, data_type, fixed_point_position)); + T tensor; + TensorInfo info(shape, num_channels, data_type, fixed_point_position); + info.set_quantization_info(quantization_info); + tensor.allocator()->init(info); return tensor; } diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp index 25e881f4ce..2986369d9b 100644 --- a/tests/validation/CL/DirectConvolutionLayer.cpp +++ b/tests/validation/CL/DirectConvolutionLayer.cpp @@ -47,21 +47,11 @@ RelativeTolerance<half> tolerance_fp16(half(0.2)); /**< Tolerance for floating RelativeTolerance<float> tolerance_fp32(0.02f); /**< Tolerance for floating point tests */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ -constexpr AbsoluteTolerance<int8_t> tolerance_qs8(0); /**< Tolerance for fixed point tests */ -constexpr AbsoluteTolerance<int16_t> tolerance_qs16(0); /**< Tolerance for fixed point tests */ +constexpr AbsoluteTolerance<int8_t> tolerance_qs8(0); /**< Tolerance for fixed point tests */ +constexpr AbsoluteTolerance<int16_t> tolerance_qs16(0); /**< Tolerance for fixed point tests */ +constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance for quantized tests */ /** Direct convolution data set. */ -const auto data_quantized = combine(datasets::SmallDirectConvolutionShapes(), - combine(framework::dataset::make("StrideX", 1, 3), - combine(framework::dataset::make("StrideY", 1, 3), - combine(concat(combine(framework::dataset::make("PadX", 0), - combine(framework::dataset::make("PadY", 0), - framework::dataset::make("KernelSize", 1))), - combine(framework::dataset::make("PadX", 0, 2), - combine(framework::dataset::make("PadY", 0, 2), - framework::dataset::make("KernelSize", { 3 })))), - framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); - const auto data = combine(datasets::SmallDirectConvolutionShapes(), combine(framework::dataset::make("StrideX", 1, 3), combine(framework::dataset::make("StrideY", 1, 3), @@ -72,6 +62,16 @@ const auto data = combine(datasets::SmallDirectConvolutionShapes(), combine(framework::dataset::make("PadY", 0, 2), framework::dataset::make("KernelSize", { 3, 5 })))), framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); +const auto data_fixed_point = combine(datasets::SmallDirectConvolutionShapes(), + combine(framework::dataset::make("StrideX", 1, 3), + combine(framework::dataset::make("StrideY", 1, 3), + combine(concat(combine(framework::dataset::make("PadX", 0), + combine(framework::dataset::make("PadY", 0), + framework::dataset::make("KernelSize", 1))), + combine(framework::dataset::make("PadX", 0, 2), + combine(framework::dataset::make("PadY", 0, 2), + framework::dataset::make("KernelSize", { 3 })))), + framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); } // namespace TEST_SUITE(CL) @@ -103,9 +103,9 @@ TEST_SUITE_END() template <typename T> using CLDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>; -TEST_SUITE(Quantized) +TEST_SUITE(FixedPoint) TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(data_quantized, framework::dataset::make("DataType", DataType::QS8)), +FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(data_fixed_point, framework::dataset::make("DataType", DataType::QS8)), framework::dataset::make("FractionalBits", 2, 7))) { // Validate output @@ -114,7 +114,7 @@ FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int8_t>, f TEST_SUITE_END() TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(data_quantized, framework::dataset::make("DataType", DataType::QS16)), +FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(data_fixed_point, framework::dataset::make("DataType", DataType::QS16)), framework::dataset::make("FractionalBits", 2, 15))) { // Validate output @@ -123,6 +123,20 @@ FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int16_t>, TEST_SUITE_END() TEST_SUITE_END() +template <typename T> +using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>; + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(data, framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() +TEST_SUITE_END() + TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CPP/ConvolutionLayer.cpp b/tests/validation/CPP/ConvolutionLayer.cpp index ab3690a493..aa73869a0e 100644 --- a/tests/validation/CPP/ConvolutionLayer.cpp +++ b/tests/validation/CPP/ConvolutionLayer.cpp @@ -23,11 +23,15 @@ */ #include "ConvolutionLayer.h" +#include "tests/validation/CPP/Utils.h" +#include "tests/validation/CPP/UtilsQuantizedAsymm.h" #include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" #include "tests/framework/Asserts.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" + namespace arm_compute { namespace test @@ -45,9 +49,14 @@ inline bool is_valid_pixel(int i, int min, int max) // 3D convolution for floating point type template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0> -void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int fixed_point_position) +void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<T> &bias, SimpleTensor<T> &out, + int i_offset, int w_offset, int b_offset, int o_offset, + int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights) { - ARM_COMPUTE_UNUSED(fixed_point_position); + const T *in_ptr = in.data() + i_offset; + const T *w_ptr = weights.data() + w_offset; + const T *b_ptr = bias.data() + b_offset; + T *out_ptr = out.data() + o_offset; const int half_width_weights = width_weights / 2; const int half_height_weights = height_weights / 2; @@ -72,8 +81,8 @@ void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, const int idx = xk + half_width_weights; const int idy = yk + half_height_weights; - const T i_value = in[offset_slice_in + xk + yk * width_in]; - const T w_value = weights[idx + idy * width_weights + ifm * width_weights * height_weights]; + const T i_value = in_ptr[offset_slice_in + xk + yk * width_in]; + const T w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights]; acc += i_value * w_value; } @@ -82,14 +91,21 @@ void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, } // Accumulate the bias and store the result - *out = acc + (*bias); + *out_ptr = acc + (*b_ptr); } // 3D convolution for fixed point type template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0> -void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, - int fixed_point_position) +void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<T> &bias, SimpleTensor<T> &out, + int i_offset, int w_offset, int b_offset, int o_offset, + int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights) { + const T *in_ptr = in.data() + i_offset; + const T *w_ptr = weights.data() + w_offset; + const T *b_ptr = bias.data() + b_offset; + T *out_ptr = out.data() + o_offset; + int fixed_point_position = in.fixed_point_position(); + const int half_width_weights = width_weights / 2; const int half_height_weights = height_weights / 2; @@ -116,8 +132,8 @@ void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, const int idx = xk + half_width_weights; const int idy = yk + half_height_weights; - const fixed_point<promoted_type> i_value(in[offset_slice_in + xk + yk * width_in], fixed_point_position, true); - const fixed_point<promoted_type> w_value(weights[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true); + const fixed_point<promoted_type> i_value(in_ptr[offset_slice_in + xk + yk * width_in], fixed_point_position, true); + const fixed_point<promoted_type> w_value(w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true); const fixed_point<promoted_type> iw = i_value * w_value; acc = iw + acc; } @@ -126,12 +142,79 @@ void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, } // Get the bias - const fixed_point<promoted_type> b(*bias, fixed_point_position, true); + const fixed_point<promoted_type> b(*b_ptr, fixed_point_position, true); // Accumulate the bias and covert back acc = acc + b; fixed_point<T> res(acc); - *out = res.raw(); + *out_ptr = res.raw(); +} + +// 3D convolution for QASYMM8 type +template <> +void convolution3d(const SimpleTensor<uint8_t> &in, const SimpleTensor<uint8_t> &weights, const SimpleTensor<uint8_t> &bias, SimpleTensor<uint8_t> &out, + int i_offset, int w_offset, int b_offset, int o_offset, + int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights) +{ + const uint8_t *in_ptr = in.data() + i_offset; + const uint8_t *w_ptr = weights.data() + w_offset; + const uint8_t *b_ptr = bias.data() + b_offset; + uint8_t *out_ptr = out.data() + o_offset; + + const int input_offset = -in.quantization_info().offset; + const float input_scale = in.quantization_info().scale; + const int weights_offset = -weights.quantization_info().offset; + const float weights_scale = weights.quantization_info().scale; + const int output_offset = out.quantization_info().offset; + const float output_scale = out.quantization_info().scale; + + int output_multiplier = 0; + int output_shift = 0; + const float multiplier = input_scale * weights_scale / output_scale; + arm_compute::quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + + const int half_width_weights = width_weights / 2; + const int half_height_weights = height_weights / 2; + + // Reset accumulator + int32_t acc(0); + + // Compute a 2D convolution for each IFM and accumulate the result + for(int ifm = 0; ifm < depth_in; ++ifm) + { + // Compute the offset for the input slice + const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in; + + // Compute 2D convolution + for(int yk = -half_height_weights; yk <= half_height_weights; ++yk) + { + for(int xk = -half_width_weights; xk <= half_width_weights; ++xk) + { + // Check if the pixel is out-of-bound + if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in)) + { + const int idx = xk + half_width_weights; + const int idy = yk + half_height_weights; + + const uint8_t i_value = in_ptr[offset_slice_in + xk + yk * width_in]; + const uint8_t w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights]; + + acc += (i_value + input_offset) * (w_value + weights_offset); + } + } + } + } + + // Accumulate the bias + acc += (*b_ptr); + + acc = asymm_rounding_divide_by_pow2(asymm_int_mult(acc, output_multiplier), output_shift); + acc += output_offset; + acc = std::max<int32_t>(acc, 0); + acc = std::min<int32_t>(acc, 255); + + // Store the result + *out_ptr = acc; } } // namespace @@ -139,7 +222,7 @@ template <typename T> SimpleTensor<T> convolution_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<T> &bias, const TensorShape &output_shape, const PadStrideInfo &info) { // Create reference - SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position() }; + SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() }; // Compute reference const int width_in = src.shape().x(); @@ -182,14 +265,11 @@ SimpleTensor<T> convolution_layer(const SimpleTensor<T> &src, const SimpleTensor ARM_COMPUTE_ASSERT(yo < height_out); // Compute 3D convolution - convolution3d(src.data() + offset_in, - weights.data() + ofm * width_weights * height_weights * depth_weights, - bias.data() + ofm, - dst.data() + offset_out, + convolution3d(src, weights, bias, dst, + offset_in, ofm * width_weights * height_weights * depth_weights, ofm, offset_out, xi, yi, width_in, height_in, depth_in, - width_weights, height_weights, - src.fixed_point_position()); + width_weights, height_weights); } } } @@ -206,6 +286,8 @@ template SimpleTensor<qint8_t> convolution_layer(const SimpleTensor<qint8_t> &sr const PadStrideInfo &info); template SimpleTensor<qint16_t> convolution_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &output_shape, const PadStrideInfo &info); +template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<uint8_t> &bias, const TensorShape &output_shape, + const PadStrideInfo &info); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/CPP/UtilsQuantizedAsymm.h b/tests/validation/CPP/UtilsQuantizedAsymm.h new file mode 100644 index 0000000000..b7b69d588a --- /dev/null +++ b/tests/validation/CPP/UtilsQuantizedAsymm.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_TEST_VALIDATION_UTILS_QUANTIZED_ASYMM_H__ +#define __ARM_COMPUTE_TEST_VALIDATION_UTILS_QUANTIZED_ASYMM_H__ + +#include <cstdint> + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +/** Rounded to nearest division by a power-of-two. */ +inline int32_t asymm_rounding_divide_by_pow2(int32_t x, int exponent) +{ + const int32_t mask = (1 << exponent) - 1; + const int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0); + return (x >> exponent) + ((x & mask) > threshold ? 1 : 0); +} + +/** Multiplication of two integers. The same as ARMv7 NEON VQRDMULH instruction. */ +inline int32_t asymm_int_mult(int32_t a, int32_t b) +{ + bool overflow = a == b && a == std::numeric_limits<int32_t>::min(); + int64_t a_64(a); + int64_t b_64(b); + int64_t ab_64 = a_64 * b_64; + int32_t nudge = ab_64 >= 0 ? (1 << 30) : (1 - (1 << 30)); + int32_t ab_x2_high32 = static_cast<int32_t>((ab_64 + nudge) / (1ll << 31)); + return overflow ? std::numeric_limits<int32_t>::max() : ab_x2_high32; +} +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* __ARM_COMPUTE_TEST_VALIDATION_UTILS_QUANTIZED_ASYMM_H__ */ diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h index a709157c7b..e302657158 100644 --- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h @@ -41,22 +41,24 @@ namespace test namespace validation { template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class DirectConvolutionValidationFixedPointFixture : public framework::Fixture +class DirectConvolutionValidationGenericFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits) + void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, + DataType data_type, int fractional_bits, QuantizationInfo quantization_info) { - _fractional_bits = fractional_bits; - _data_type = data_type; + _fractional_bits = fractional_bits; + _quantization_info = quantization_info; + _data_type = data_type; const TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels); const TensorShape bias_shape(num_kernels); const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR); const TensorShape output_shape = get_output_shape(input_shape, weights_shape, info); - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, fractional_bits); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, fractional_bits); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, fractional_bits, quantization_info); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, fractional_bits, quantization_info); } protected: @@ -65,6 +67,12 @@ protected: { switch(tensor.data_type()) { + case DataType::QASYMM8: + { + std::uniform_int_distribution<uint8_t> distribution(0, 10); + library->fill(tensor, distribution, i); + break; + } case DataType::F16: case DataType::F32: { @@ -78,13 +86,13 @@ protected: } TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, int fixed_point_position) + DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) { // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position); - TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, fixed_point_position); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position, quantization_info); + TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position, quantization_info); + TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, fixed_point_position, quantization_info); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position, quantization_info); // Create and configure function FunctionType conv; @@ -118,12 +126,12 @@ protected: } SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, int fixed_point_position) + DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) { // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position }; - SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position }; - SimpleTensor<T> bias{ bias_shape, data_type, 1, fixed_point_position }; + SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> bias{ bias_shape, data_type, 1, fixed_point_position, quantization_info }; // Fill reference fill(src, 0); @@ -133,10 +141,11 @@ protected: return reference::convolution_layer<T>(src, weights, bias, output_shape, info); } - TensorType _target{}; - SimpleTensor<T> _reference{}; - int _fractional_bits{}; - DataType _data_type{}; + TensorType _target{}; + SimpleTensor<T> _reference{}; + int _fractional_bits{}; + QuantizationInfo _quantization_info{}; + DataType _data_type{}; private: TensorShape get_output_shape(TensorShape in_shape, TensorShape kernel_shape, const PadStrideInfo &info) @@ -155,15 +164,39 @@ private: }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class DirectConvolutionValidationFixture : public DirectConvolutionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T> +class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: template <typename...> void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type) { - DirectConvolutionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0); + DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, QuantizationInfo()); } }; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DirectConvolutionValidationFixedPointFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + template <typename...> + void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits) + { + DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, fractional_bits, + QuantizationInfo()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + template <typename...> + void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info) + { + DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, quantization_info); + } +}; + } // namespace validation } // namespace test } // namespace arm_compute |