aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-11-08 12:24:09 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commite75a02b60736f37c34388c23c0ccee230f65da59 (patch)
treef8e9423e40589e99bd8be6c1e740b17792e2058e /tests
parent6c0348f4cbf6e30a715780f50aebf6dd0a2a8fc3 (diff)
downloadComputeLibrary-e75a02b60736f37c34388c23c0ccee230f65da59.tar.gz
COMPMID-675 - Reworked NEGEMMLowp interface/function
The new interface makes NEGEMMLowp able to work with ASYMM8 data types. Implemented 2 new functions: - NEGEMMLowpMatrixMultiplyCore - NEGEMMLowpOutputStage These functions should make the integration in android NN doable For more information about GEMMLowp: https://github.com/google/gemmlowp/blob/master/doc/low-precision.md Change-Id: Ie2c775f45234f68ca53dba644b3a912b997fd890 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/95504 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/benchmark/NEON/GEMMLowp.cpp1
-rw-r--r--tests/datasets/GEMMLowpDataset.h36
-rw-r--r--tests/datasets/LargeGEMMLowpDataset.h12
-rw-r--r--tests/datasets/SmallGEMMLowpDataset.h12
-rw-r--r--tests/validation/CPP/GEMMLowp.cpp67
-rw-r--r--tests/validation/CPP/GEMMLowp.h6
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp93
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h94
8 files changed, 155 insertions, 166 deletions
diff --git a/tests/benchmark/NEON/GEMMLowp.cpp b/tests/benchmark/NEON/GEMMLowp.cpp
index 8cf143393d..a0e5e694bd 100644
--- a/tests/benchmark/NEON/GEMMLowp.cpp
+++ b/tests/benchmark/NEON/GEMMLowp.cpp
@@ -23,7 +23,6 @@
*/
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEGEMMLowp.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
diff --git a/tests/datasets/GEMMLowpDataset.h b/tests/datasets/GEMMLowpDataset.h
index 4bf2a98d61..062c05b1d9 100644
--- a/tests/datasets/GEMMLowpDataset.h
+++ b/tests/datasets/GEMMLowpDataset.h
@@ -37,7 +37,7 @@ namespace datasets
class GEMMLowpDataset
{
public:
- using type = std::tuple<TensorShape, TensorShape, TensorShape, int32_t, int32_t, int32_t, int32_t, int32_t>;
+ using type = std::tuple<TensorShape, TensorShape, TensorShape, int32_t, int32_t>;
struct iterator
{
@@ -45,18 +45,12 @@ public:
std::vector<TensorShape>::const_iterator b_it,
std::vector<TensorShape>::const_iterator c_it,
std::vector<int32_t>::const_iterator a_offset_it,
- std::vector<int32_t>::const_iterator b_offset_it,
- std::vector<int32_t>::const_iterator c_offset_it,
- std::vector<int32_t>::const_iterator c_mult_int_it,
- std::vector<int32_t>::const_iterator out_shift_it)
+ std::vector<int32_t>::const_iterator b_offset_it)
: _a_it{ std::move(a_it) },
_b_it{ std::move(b_it) },
_c_it{ std::move(c_it) },
_a_offset_it{ std::move(a_offset_it) },
- _b_offset_it{ std::move(b_offset_it) },
- _c_offset_it{ std::move(c_offset_it) },
- _c_mult_int_it{ std::move(c_mult_int_it) },
- _out_shift_it{ std::move(out_shift_it) }
+ _b_offset_it{ std::move(b_offset_it) }
{
}
@@ -68,15 +62,12 @@ public:
description << "C=" << *_c_it << ":";
description << "a_offset=" << *_a_offset_it << ":";
description << "b_offset=" << *_b_offset_it << ":";
- description << "c_offset=" << *_c_offset_it << ":";
- description << "c_mult_int=" << *_c_mult_int_it << ":";
- description << "out_shift=" << *_out_shift_it << ":";
return description.str();
}
GEMMLowpDataset::type operator*() const
{
- return std::make_tuple(*_a_it, *_b_it, *_c_it, *_a_offset_it, *_b_offset_it, *_c_offset_it, *_c_mult_int_it, *_out_shift_it);
+ return std::make_tuple(*_a_it, *_b_it, *_c_it, *_a_offset_it, *_b_offset_it);
}
iterator &operator++()
@@ -86,9 +77,6 @@ public:
++_c_it;
++_a_offset_it;
++_b_offset_it;
- ++_c_offset_it;
- ++_c_mult_int_it;
- ++_out_shift_it;
return *this;
}
@@ -99,32 +87,25 @@ public:
std::vector<TensorShape>::const_iterator _c_it;
std::vector<int32_t>::const_iterator _a_offset_it;
std::vector<int32_t>::const_iterator _b_offset_it;
- std::vector<int32_t>::const_iterator _c_offset_it;
- std::vector<int32_t>::const_iterator _c_mult_int_it;
- std::vector<int32_t>::const_iterator _out_shift_it;
};
iterator begin() const
{
- return iterator(_a_shapes.begin(), _b_shapes.begin(), _c_shapes.begin(), _a_offset.begin(), _b_offset.begin(), _c_offset.begin(), _c_mult_int.begin(), _out_shift.begin());
+ return iterator(_a_shapes.begin(), _b_shapes.begin(), _c_shapes.begin(), _a_offset.begin(), _b_offset.begin());
}
int size() const
{
- return std::min(_a_shapes.size(), std::min(_b_shapes.size(), std::min(_c_shapes.size(), std::min(_a_offset.size(), std::min(_b_offset.size(), std::min(_c_offset.size(), std::min(_c_mult_int.size(),
- _out_shift.size())))))));
+ return std::min(_a_shapes.size(), std::min(_b_shapes.size(), std::min(_c_shapes.size(), std::min(_a_offset.size(), _b_offset.size()))));
}
- void add_config(TensorShape a, TensorShape b, TensorShape c, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
+ void add_config(TensorShape a, TensorShape b, TensorShape c, int32_t a_offset, int32_t b_offset)
{
_a_shapes.emplace_back(std::move(a));
_b_shapes.emplace_back(std::move(b));
_c_shapes.emplace_back(std::move(c));
_a_offset.emplace_back(std::move(a_offset));
_b_offset.emplace_back(std::move(b_offset));
- _c_offset.emplace_back(std::move(c_offset));
- _c_mult_int.emplace_back(std::move(c_mult_int));
- _out_shift.emplace_back(std::move(out_shift));
}
protected:
@@ -137,9 +118,6 @@ private:
std::vector<TensorShape> _c_shapes{};
std::vector<int32_t> _a_offset{};
std::vector<int32_t> _b_offset{};
- std::vector<int32_t> _c_offset{};
- std::vector<int32_t> _c_mult_int{};
- std::vector<int32_t> _out_shift{};
};
} // namespace datasets
} // namespace test
diff --git a/tests/datasets/LargeGEMMLowpDataset.h b/tests/datasets/LargeGEMMLowpDataset.h
index 10f79e423d..cc1feb49a2 100644
--- a/tests/datasets/LargeGEMMLowpDataset.h
+++ b/tests/datasets/LargeGEMMLowpDataset.h
@@ -42,12 +42,12 @@ class LargeGEMMLowpDataset final : public GEMMLowpDataset
public:
LargeGEMMLowpDataset()
{
- add_config(TensorShape(923U, 429U), TensorShape(871U, 923U), TensorShape(871U, 429U), 0, 0, 0, 1, 0);
- add_config(TensorShape(873U, 513U), TensorShape(784U, 873U), TensorShape(784U, 513U), 0, 4, 3, 2, 0);
- add_config(TensorShape(697U, 872U), TensorShape(563U, 697U), TensorShape(563U, 872U), -2, 0, 1, 1, 0);
- add_config(TensorShape(1021U, 973U), TensorShape(783U, 1021U), TensorShape(783U, 973U), 5, 13, -6, 2, 2);
- add_config(TensorShape(681U, 1023U), TensorShape(213U, 681U), TensorShape(213U, 1023U), -3, -2, 8, 4, 3);
- add_config(TensorShape(941U, 1011U), TensorShape(623U, 941U), TensorShape(623U, 1011U), -9, 1, -3, 3, 1);
+ add_config(TensorShape(923U, 429U), TensorShape(871U, 923U), TensorShape(871U, 429U), 0, 0);
+ add_config(TensorShape(873U, 513U), TensorShape(784U, 873U), TensorShape(784U, 513U), 0, 4);
+ add_config(TensorShape(697U, 872U), TensorShape(563U, 697U), TensorShape(563U, 872U), -2, 0);
+ add_config(TensorShape(1021U, 973U), TensorShape(783U, 1021U), TensorShape(783U, 973U), 5, 13);
+ add_config(TensorShape(681U, 1023U), TensorShape(213U, 681U), TensorShape(213U, 1023U), -3, -2);
+ add_config(TensorShape(941U, 1011U), TensorShape(623U, 941U), TensorShape(623U, 1011U), -9, 1);
}
};
} // namespace datasets
diff --git a/tests/datasets/SmallGEMMLowpDataset.h b/tests/datasets/SmallGEMMLowpDataset.h
index b7fe3907ad..881546e70f 100644
--- a/tests/datasets/SmallGEMMLowpDataset.h
+++ b/tests/datasets/SmallGEMMLowpDataset.h
@@ -42,12 +42,12 @@ class SmallGEMMLowpDataset final : public GEMMLowpDataset
public:
SmallGEMMLowpDataset()
{
- add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0, 0, 1, 0);
- add_config(TensorShape(52U, 13U), TensorShape(33U, 52U), TensorShape(33U, 13U), 0, 4, 3, 2, 0);
- add_config(TensorShape(52U, 26U), TensorShape(33U, 52U), TensorShape(33U, 26U), -2, 0, 1, 1, 0);
- add_config(TensorShape(31U, 27U), TensorShape(23U, 31U), TensorShape(23U, 27U), 5, 13, -6, 2, 2);
- add_config(TensorShape(38U, 12U), TensorShape(21U, 38U), TensorShape(21U, 12U), -3, -2, 8, 4, 3);
- add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1, -3, 3, 1);
+ add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0);
+ add_config(TensorShape(52U, 13U), TensorShape(33U, 52U), TensorShape(33U, 13U), 0, 4);
+ add_config(TensorShape(52U, 26U), TensorShape(33U, 52U), TensorShape(33U, 26U), -2, 0);
+ add_config(TensorShape(31U, 27U), TensorShape(23U, 31U), TensorShape(23U, 27U), 5, 13);
+ add_config(TensorShape(38U, 12U), TensorShape(21U, 38U), TensorShape(21U, 12U), -3, -2);
+ add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1);
}
};
} // namespace datasets
diff --git a/tests/validation/CPP/GEMMLowp.cpp b/tests/validation/CPP/GEMMLowp.cpp
index e1d76503cd..bac3a20c8e 100644
--- a/tests/validation/CPP/GEMMLowp.cpp
+++ b/tests/validation/CPP/GEMMLowp.cpp
@@ -21,10 +21,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "GEMM.h"
+#include "GEMMLowp.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
namespace arm_compute
{
@@ -34,17 +33,21 @@ namespace validation
{
namespace reference
{
-SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, SimpleTensor<int32_t> &c)
+template <typename T>
+SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<T> &a, const SimpleTensor<T> &b, int32_t a_offset, int32_t b_offset)
{
- ARM_COMPUTE_UNUSED(a);
- ARM_COMPUTE_UNUSED(b);
- ARM_COMPUTE_UNUSED(c);
- const int K = a.shape().x();
- const int b_width = b.shape().x();
- const int rows = c.shape().y(); //M
- const int cols = c.shape().x(); //N
+ TensorShape shape(b.shape()[0], a.shape()[1]);
+
+ SimpleTensor<int32_t> c(shape, DataType::S32);
+
+ const int K = a.shape().x();
+ const int b_width = b.shape().x();
+ const int rows = c.shape().y(); //M
+ const int cols = c.shape().x(); //N
+
std::vector<int32_t> acc;
acc.resize(cols);
+
for(int i = 0; i < rows; ++i)
{
for(int j = 0; j < cols; ++j)
@@ -53,10 +56,10 @@ SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor
}
for(int k = 0; k < K; ++k)
{
- auto tmp_a = static_cast<int32_t>(a[k + i * K]);
+ const int32_t tmp_a = a_offset + static_cast<int32_t>(a[k + i * K]);
for(int j = 0; j < b_width; ++j)
{
- auto tmp_b = static_cast<int32_t>(b[j + k * b_width]);
+ const int32_t tmp_b = b_offset + static_cast<int32_t>(b[j + k * b_width]);
const int32_t mult_as_int = tmp_a * tmp_b;
acc[j] += mult_as_int;
}
@@ -71,43 +74,21 @@ SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor
}
template <typename T>
-SimpleTensor<T> gemmlowp(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
+SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
{
- const int K = a.shape().x();
- const int b_width = b.shape().x();
- const int rows = c.shape().y(); //M
- const int cols = c.shape().x(); //N
- std::vector<int32_t> acc;
- acc.resize(cols);
- for(int i = 0; i < rows; ++i)
+ SimpleTensor<uint8_t> dst(in.shape(), DataType::QASYMM8);
+
+ for(int i = 0; i < in.num_elements(); ++i)
{
- for(int j = 0; j < cols; ++j)
- {
- acc[j] = 0;
- }
- for(int k = 0; k < K; ++k)
- {
- const int32_t tmp_a = a_offset + static_cast<int32_t>(a[k + i * K]);
- for(int j = 0; j < b_width; ++j)
- {
- const int32_t tmp_b = b_offset + static_cast<int32_t>(b[j + k * b_width]);
- const int32_t mult_as_int = tmp_a * tmp_b;
- acc[j] += mult_as_int;
- }
- }
- for(int j = 0; j < cols; ++j)
- {
- const int32_t result = ((c_offset + acc[j]) * c_mult_int) >> out_shift;
- c[j + i * cols] = static_cast<int8_t>(std::min(127, std::max(-128, result)));
- }
+ const int32_t result = ((in[i] + result_offset) * result_mult_int) >> result_shift;
+ dst[i] = static_cast<uint8_t>(std::max(0, std::min(255, result)));
}
- return c;
+ return dst;
}
-template SimpleTensor<int8_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, SimpleTensor<int8_t> &c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift);
+template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, int32_t a_offset, int32_t b_offset);
+template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, int32_t result_offset, int32_t result_mult_int, int32_t result_shift);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/CPP/GEMMLowp.h b/tests/validation/CPP/GEMMLowp.h
index 2f903f2fe2..c09d8f6176 100644
--- a/tests/validation/CPP/GEMMLowp.h
+++ b/tests/validation/CPP/GEMMLowp.h
@@ -35,11 +35,11 @@ namespace validation
{
namespace reference
{
-SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, SimpleTensor<int32_t> &c);
+template <typename T>
+SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<T> &a, const SimpleTensor<T> &b, int32_t a_offset, int32_t b_offset);
template <typename T>
-SimpleTensor<T> gemmlowp(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift);
+SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 4924f98ea6..4407eff060 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -23,12 +23,15 @@
*/
#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEGEMMLowp.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
#include "tests/NEON/Helper.h"
+#include "tests/PaddingCalculator.h"
#include "tests/datasets/LargeGEMMLowpDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
#include "tests/datasets/SmallGEMMLowpDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
@@ -45,16 +48,13 @@ namespace validation
{
namespace
{
-const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
-const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4);
-const auto data_matrix_multiply = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 16);
+const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
+const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(GEMMLowp)
-TEST_SUITE(S8)
-
TEST_SUITE(INTERLEAVE_BLOCKED)
using NEInterleaveBlocked = NESynthetizeFunction<NEGEMMInterleaveBlockedKernel>;
@@ -77,50 +77,95 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedTransposedFixture, frame
TEST_SUITE_END()
-using NEGEMMLowpOffsetFixture = GEMMLowpOffsetValidationFixture<Tensor, Accessor, NEGEMMLowp>;
+TEST_SUITE(MatrixMultiplyCore)
+using NEGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()), framework::dataset::make("DataType",
- DataType::S8)),
- shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()),
+ shape_a, shape_b, shape_c, a_offset, b_offset)
{
// Create tensors
- Tensor a = create_tensor<Tensor>(shape_a, data_type);
- Tensor b = create_tensor<Tensor>(shape_b, data_type);
- Tensor c = create_tensor<Tensor>(shape_c, data_type);
+ Tensor a = create_tensor<Tensor>(shape_a, DataType::QASYMM8);
+ Tensor b = create_tensor<Tensor>(shape_b, DataType::QASYMM8);
+ Tensor c = create_tensor<Tensor>(shape_c, DataType::S32);
+
+ a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
+ b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
// Create and configure function
- NEGEMMLowp gemmlowp;
- gemmlowp.configure(&a, &b, &c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
+ NEGEMMLowpMatrixMultiplyCore gemmlowp_mm;
+ gemmlowp_mm.configure(&a, &b, &c);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpDataset(), framework::dataset::make("DataType", DataType::S8)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpOffsetFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpDataset(), framework::dataset::make("DataType", DataType::S8)))
+TEST_SUITE_END() // MatrixMultiplyCore
+
+TEST_SUITE(OutputStage)
+
+TEST_SUITE(QuantizeDownInt32ToUint8Scale)
+
+using NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8Scale>;
+
+const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -4, 4) * framework::dataset::make("result_mult_int", 1, 3) * framework::dataset::make("result_shift", 2,
+ 4);
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), quantize_down_int32_to_uint8_scale_cases),
+ shape, result_offset, result_mult_int, result_shift)
+{
+ // Create tensors
+ Tensor in = create_tensor<Tensor>(shape, DataType::S32);
+ Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8);
+
+ ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ NEGEMMLowpQuantizeDownInt32ToUint8Scale output_stage;
+ output_stage.configure(&in, &out, result_offset, result_mult_int, result_shift);
+
+ // Validate valid region
+ const ValidRegion valid_region = shape_to_valid_region(shape);
+ validate(in.info()->valid_region(), valid_region);
+ validate(out.info()->valid_region(), valid_region);
+
+ // Validate padding
+ const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+ validate(in.info()->padding(), padding);
+ validate(out.info()->padding(), padding);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
{
// Validate output
validate(Accessor(_target), _reference);
}
-TEST_SUITE_END() // U8
-TEST_SUITE(S32)
-using NEGEMMLowpMatrixMultiplyFixture = GEMMLowpMatrixMultiplyValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
-FIXTURE_DATA_TEST_CASE(MatrixMultiply, NEGEMMLowpMatrixMultiplyFixture, framework::DatasetMode::PRECOMMIT, data_matrix_multiply)
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases))
{
// Validate output
validate(Accessor(_target), _reference);
}
-TEST_SUITE_END()
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale
+
+TEST_SUITE_END() // OutputStage
+
+TEST_SUITE_END() // GEMMLowp
+TEST_SUITE_END() // NEON
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index fba44008ba..f9b0dbd959 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -43,36 +43,39 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType>
-class GEMMLowpOffsetValidationFixture : public framework::Fixture
+class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, int32_t a_offset, int32_t b_offset)
{
- _target = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type);
- _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type);
+ _target = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset);
+ _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset);
}
protected:
template <typename U>
void fill(U &&tensor, int i)
{
- ARM_COMPUTE_ERROR_ON(tensor.data_type() != DataType::S8);
- std::uniform_int_distribution<> distribution(0, 3);
+ // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
+ std::uniform_int_distribution<> distribution(1, 254);
library->fill(tensor, distribution, i);
}
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
+ int32_t a_offset, int32_t b_offset)
{
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
- TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
- TensorType c = create_tensor<TensorType>(shape_c, data_type, 1);
+ TensorType a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1);
+ TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
+
+ a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
+ b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
// Create and configure function
FunctionType gemmlowp;
- gemmlowp.configure(&a, &b, &c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
+ gemmlowp.configure(&a, &b, &c);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -90,108 +93,91 @@ protected:
// Fill tensors
fill(AccessorType(a), 0);
fill(AccessorType(b), 1);
- fill(AccessorType(c), 2);
// Compute GEMM function
gemmlowp.run();
return c;
}
- SimpleTensor<int8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
+ SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
+ int32_t a_offset, int32_t b_offset)
{
// Create reference
- SimpleTensor<int8_t> a{ shape_a, data_type, 1 };
- SimpleTensor<int8_t> b{ shape_b, data_type, 1 };
- SimpleTensor<int8_t> c{ shape_c, data_type, 1 };
+ SimpleTensor<uint8_t> a{ shape_a, DataType::QASYMM8, 1 };
+ SimpleTensor<uint8_t> b{ shape_b, DataType::QASYMM8, 1 };
// Fill reference
fill(a, 0);
fill(b, 1);
- fill(c, 2);
- return reference::gemmlowp<int8_t>(a, b, c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
+ return reference::gemmlowp_matrix_multiply_core<uint8_t>(a, b, a_offset, b_offset);
}
- TensorType _target{};
- SimpleTensor<int8_t> _reference{};
+ TensorType _target{};
+ SimpleTensor<int32_t> _reference{};
};
template <typename TensorType, typename AccessorType, typename FunctionType>
-class GEMMLowpMatrixMultiplyValidationFixture : public framework::Fixture
+class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(size_t m, size_t n, size_t k)
+ void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
{
- const TensorShape shape_a(k, m);
- const TensorShape shape_b(n, k);
- const TensorShape shape_c(n, m);
- _target = compute_target(shape_a, shape_b, shape_c);
- _reference = compute_reference(shape_a, shape_b, shape_c);
+ _target = compute_target(shape, result_offset, result_mult_int, result_shift);
+ _reference = compute_reference(shape, result_offset, result_mult_int, result_shift);
}
protected:
template <typename U>
- void fill(U &&tensor, int i, int lo, int hi)
+ void fill(U &&tensor, int i)
{
- std::uniform_int_distribution<> distribution(lo, hi);
+ std::uniform_int_distribution<> distribution(-6000, 6000);
library->fill(tensor, distribution, i);
}
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
+ TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
{
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, DataType::S8, 1);
- TensorType b = create_tensor<TensorType>(shape_b, DataType::S8, 1);
- TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
+ TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
+ TensorType b = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
// Create and configure function
- FunctionType gemmlowp;
- gemmlowp.configure(&a, &b, &c);
+ FunctionType output_stage;
+ output_stage.configure(&a, &b, result_offset, result_mult_int, result_shift);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
// Allocate tensors
a.allocator()->allocate();
b.allocator()->allocate();
- c.allocator()->allocate();
ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
// Fill tensors
- fill(AccessorType(a), 0, -128, 127);
- fill(AccessorType(b), 1, -128, 127);
- fill(AccessorType(c), 2, 0, 0);
+ fill(AccessorType(a), 0);
// Compute GEMM function
- gemmlowp.run();
- return c;
+ output_stage.run();
+ return b;
}
- SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
+ SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
{
// Create reference
- SimpleTensor<int8_t> a{ shape_a, DataType::S8, 1 };
- SimpleTensor<int8_t> b{ shape_b, DataType::S8, 1 };
- SimpleTensor<int32_t> c{ shape_c, DataType::S32, 1 };
+ SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
// Fill reference
- fill(a, 0, -128, 127);
- fill(b, 1, -128, 127);
- fill(c, 2, 0, 0);
+ fill(a, 0);
- return reference::gemmlowp(a, b, c);
+ return reference::gemmlowp_quantize_down_int32_to_uint8_scale<int32_t>(a, result_offset, result_mult_int, result_shift);
}
TensorType _target{};
- SimpleTensor<int32_t> _reference{};
+ SimpleTensor<uint8_t> _reference{};
};
-
} // namespace validation
} // namespace test
} // namespace arm_compute