diff options
author | Gian Marco <gianmarco.iodice@arm.com> | 2017-11-08 12:24:09 +0000 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:35:24 +0000 |
commit | e75a02b60736f37c34388c23c0ccee230f65da59 (patch) | |
tree | f8e9423e40589e99bd8be6c1e740b17792e2058e /tests/datasets | |
parent | 6c0348f4cbf6e30a715780f50aebf6dd0a2a8fc3 (diff) | |
download | ComputeLibrary-e75a02b60736f37c34388c23c0ccee230f65da59.tar.gz |
COMPMID-675 - Reworked NEGEMMLowp interface/function
The new interface makes NEGEMMLowp able to work with ASYMM8 data types.
Implemented 2 new functions:
- NEGEMMLowpMatrixMultiplyCore
- NEGEMMLowpOutputStage
These functions should make the integration in android NN doable
For more information about GEMMLowp:
https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
Change-Id: Ie2c775f45234f68ca53dba644b3a912b997fd890
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/95504
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'tests/datasets')
-rw-r--r-- | tests/datasets/GEMMLowpDataset.h | 36 | ||||
-rw-r--r-- | tests/datasets/LargeGEMMLowpDataset.h | 12 | ||||
-rw-r--r-- | tests/datasets/SmallGEMMLowpDataset.h | 12 |
3 files changed, 19 insertions, 41 deletions
diff --git a/tests/datasets/GEMMLowpDataset.h b/tests/datasets/GEMMLowpDataset.h index 4bf2a98d61..062c05b1d9 100644 --- a/tests/datasets/GEMMLowpDataset.h +++ b/tests/datasets/GEMMLowpDataset.h @@ -37,7 +37,7 @@ namespace datasets class GEMMLowpDataset { public: - using type = std::tuple<TensorShape, TensorShape, TensorShape, int32_t, int32_t, int32_t, int32_t, int32_t>; + using type = std::tuple<TensorShape, TensorShape, TensorShape, int32_t, int32_t>; struct iterator { @@ -45,18 +45,12 @@ public: std::vector<TensorShape>::const_iterator b_it, std::vector<TensorShape>::const_iterator c_it, std::vector<int32_t>::const_iterator a_offset_it, - std::vector<int32_t>::const_iterator b_offset_it, - std::vector<int32_t>::const_iterator c_offset_it, - std::vector<int32_t>::const_iterator c_mult_int_it, - std::vector<int32_t>::const_iterator out_shift_it) + std::vector<int32_t>::const_iterator b_offset_it) : _a_it{ std::move(a_it) }, _b_it{ std::move(b_it) }, _c_it{ std::move(c_it) }, _a_offset_it{ std::move(a_offset_it) }, - _b_offset_it{ std::move(b_offset_it) }, - _c_offset_it{ std::move(c_offset_it) }, - _c_mult_int_it{ std::move(c_mult_int_it) }, - _out_shift_it{ std::move(out_shift_it) } + _b_offset_it{ std::move(b_offset_it) } { } @@ -68,15 +62,12 @@ public: description << "C=" << *_c_it << ":"; description << "a_offset=" << *_a_offset_it << ":"; description << "b_offset=" << *_b_offset_it << ":"; - description << "c_offset=" << *_c_offset_it << ":"; - description << "c_mult_int=" << *_c_mult_int_it << ":"; - description << "out_shift=" << *_out_shift_it << ":"; return description.str(); } GEMMLowpDataset::type operator*() const { - return std::make_tuple(*_a_it, *_b_it, *_c_it, *_a_offset_it, *_b_offset_it, *_c_offset_it, *_c_mult_int_it, *_out_shift_it); + return std::make_tuple(*_a_it, *_b_it, *_c_it, *_a_offset_it, *_b_offset_it); } iterator &operator++() @@ -86,9 +77,6 @@ public: ++_c_it; ++_a_offset_it; ++_b_offset_it; - ++_c_offset_it; - ++_c_mult_int_it; - ++_out_shift_it; return *this; } @@ -99,32 +87,25 @@ public: std::vector<TensorShape>::const_iterator _c_it; std::vector<int32_t>::const_iterator _a_offset_it; std::vector<int32_t>::const_iterator _b_offset_it; - std::vector<int32_t>::const_iterator _c_offset_it; - std::vector<int32_t>::const_iterator _c_mult_int_it; - std::vector<int32_t>::const_iterator _out_shift_it; }; iterator begin() const { - return iterator(_a_shapes.begin(), _b_shapes.begin(), _c_shapes.begin(), _a_offset.begin(), _b_offset.begin(), _c_offset.begin(), _c_mult_int.begin(), _out_shift.begin()); + return iterator(_a_shapes.begin(), _b_shapes.begin(), _c_shapes.begin(), _a_offset.begin(), _b_offset.begin()); } int size() const { - return std::min(_a_shapes.size(), std::min(_b_shapes.size(), std::min(_c_shapes.size(), std::min(_a_offset.size(), std::min(_b_offset.size(), std::min(_c_offset.size(), std::min(_c_mult_int.size(), - _out_shift.size()))))))); + return std::min(_a_shapes.size(), std::min(_b_shapes.size(), std::min(_c_shapes.size(), std::min(_a_offset.size(), _b_offset.size())))); } - void add_config(TensorShape a, TensorShape b, TensorShape c, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift) + void add_config(TensorShape a, TensorShape b, TensorShape c, int32_t a_offset, int32_t b_offset) { _a_shapes.emplace_back(std::move(a)); _b_shapes.emplace_back(std::move(b)); _c_shapes.emplace_back(std::move(c)); _a_offset.emplace_back(std::move(a_offset)); _b_offset.emplace_back(std::move(b_offset)); - _c_offset.emplace_back(std::move(c_offset)); - _c_mult_int.emplace_back(std::move(c_mult_int)); - _out_shift.emplace_back(std::move(out_shift)); } protected: @@ -137,9 +118,6 @@ private: std::vector<TensorShape> _c_shapes{}; std::vector<int32_t> _a_offset{}; std::vector<int32_t> _b_offset{}; - std::vector<int32_t> _c_offset{}; - std::vector<int32_t> _c_mult_int{}; - std::vector<int32_t> _out_shift{}; }; } // namespace datasets } // namespace test diff --git a/tests/datasets/LargeGEMMLowpDataset.h b/tests/datasets/LargeGEMMLowpDataset.h index 10f79e423d..cc1feb49a2 100644 --- a/tests/datasets/LargeGEMMLowpDataset.h +++ b/tests/datasets/LargeGEMMLowpDataset.h @@ -42,12 +42,12 @@ class LargeGEMMLowpDataset final : public GEMMLowpDataset public: LargeGEMMLowpDataset() { - add_config(TensorShape(923U, 429U), TensorShape(871U, 923U), TensorShape(871U, 429U), 0, 0, 0, 1, 0); - add_config(TensorShape(873U, 513U), TensorShape(784U, 873U), TensorShape(784U, 513U), 0, 4, 3, 2, 0); - add_config(TensorShape(697U, 872U), TensorShape(563U, 697U), TensorShape(563U, 872U), -2, 0, 1, 1, 0); - add_config(TensorShape(1021U, 973U), TensorShape(783U, 1021U), TensorShape(783U, 973U), 5, 13, -6, 2, 2); - add_config(TensorShape(681U, 1023U), TensorShape(213U, 681U), TensorShape(213U, 1023U), -3, -2, 8, 4, 3); - add_config(TensorShape(941U, 1011U), TensorShape(623U, 941U), TensorShape(623U, 1011U), -9, 1, -3, 3, 1); + add_config(TensorShape(923U, 429U), TensorShape(871U, 923U), TensorShape(871U, 429U), 0, 0); + add_config(TensorShape(873U, 513U), TensorShape(784U, 873U), TensorShape(784U, 513U), 0, 4); + add_config(TensorShape(697U, 872U), TensorShape(563U, 697U), TensorShape(563U, 872U), -2, 0); + add_config(TensorShape(1021U, 973U), TensorShape(783U, 1021U), TensorShape(783U, 973U), 5, 13); + add_config(TensorShape(681U, 1023U), TensorShape(213U, 681U), TensorShape(213U, 1023U), -3, -2); + add_config(TensorShape(941U, 1011U), TensorShape(623U, 941U), TensorShape(623U, 1011U), -9, 1); } }; } // namespace datasets diff --git a/tests/datasets/SmallGEMMLowpDataset.h b/tests/datasets/SmallGEMMLowpDataset.h index b7fe3907ad..881546e70f 100644 --- a/tests/datasets/SmallGEMMLowpDataset.h +++ b/tests/datasets/SmallGEMMLowpDataset.h @@ -42,12 +42,12 @@ class SmallGEMMLowpDataset final : public GEMMLowpDataset public: SmallGEMMLowpDataset() { - add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0, 0, 1, 0); - add_config(TensorShape(52U, 13U), TensorShape(33U, 52U), TensorShape(33U, 13U), 0, 4, 3, 2, 0); - add_config(TensorShape(52U, 26U), TensorShape(33U, 52U), TensorShape(33U, 26U), -2, 0, 1, 1, 0); - add_config(TensorShape(31U, 27U), TensorShape(23U, 31U), TensorShape(23U, 27U), 5, 13, -6, 2, 2); - add_config(TensorShape(38U, 12U), TensorShape(21U, 38U), TensorShape(21U, 12U), -3, -2, 8, 4, 3); - add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1, -3, 3, 1); + add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0); + add_config(TensorShape(52U, 13U), TensorShape(33U, 52U), TensorShape(33U, 13U), 0, 4); + add_config(TensorShape(52U, 26U), TensorShape(33U, 52U), TensorShape(33U, 26U), -2, 0); + add_config(TensorShape(31U, 27U), TensorShape(23U, 31U), TensorShape(23U, 27U), 5, 13); + add_config(TensorShape(38U, 12U), TensorShape(21U, 38U), TensorShape(21U, 12U), -3, -2); + add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1); } }; } // namespace datasets |