aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2019-06-13 15:58:32 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-25 13:31:23 +0000
commitbc415af5ee9517fd113e9ea0f01fdc84f9693dc4 (patch)
treea8ae892b71aba2e4384d7a738025cae20e8dcfbf /tests
parente99196cf25a24fabd66ef33764d3c532f7cddf49 (diff)
downloadComputeLibrary-bc415af5ee9517fd113e9ea0f01fdc84f9693dc4.tar.gz
COMPMID-2406: Create a new GEMMLowpQuantizeDownInt32ToInt16ScaleKernel for NEON
Change-Id: I3f3e247728fd6dafca066e41835f0ef9442d9b7a Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1379 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp114
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h92
-rw-r--r--tests/validation/reference/GEMMLowp.cpp56
-rw-r--r--tests/validation/reference/GEMMLowp.h9
4 files changed, 269 insertions, 2 deletions
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index f0460b4a23..2f604c95ea 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -294,6 +294,9 @@ const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framewo
using NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture =
GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
+using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
+ GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
+
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
@@ -404,6 +407,117 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedP
TEST_SUITE_END() // BoundedReLu
TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
+
+TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
+
+const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
+ 2)
+ * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
+
+const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
+ 2)
+ * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
+
+using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
+ GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
+ framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
+ TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max
+ TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
+ }),
+ framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
+ TensorInfo(TensorShape(21U), 1, DataType::S32),
+ TensorInfo(TensorShape(20U), 1, DataType::S32),
+ })),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
+ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
+ TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
+ })),
+ framework::dataset::make("Min",{ -205,
+ -60000,
+ -180,
+ })),
+ framework::dataset::make("Max",{ 205,
+ 60000,
+ 180,
+ })),
+ framework::dataset::make("Expected", { true, false, false })),
+ a_info, b_info, output_info, min, max, expected)
+{
+ // Lock tensors
+ Status status = NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
+ &b_info.clone()->set_is_resizable(false),
+ &output_info.clone()->set_is_resizable(false),
+ min,
+ max);
+ ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
+ quantize_down_int32_to_int16_scale_by_fixedpoint_cases),
+ shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias)
+{
+ TensorShape shape_bias(shape[0]);
+
+ // Create tensors
+ Tensor in = create_tensor<Tensor>(shape, DataType::S32);
+ Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32);
+ Tensor out = create_tensor<Tensor>(shape, DataType::QSYMM16);
+
+ ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint output_stage;
+ output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, min, max);
+
+ // Validate valid region input and output
+ const ValidRegion valid_region = shape_to_valid_region(shape);
+ validate(in.info()->valid_region(), valid_region);
+ validate(out.info()->valid_region(), valid_region);
+
+ // Validate valid region bias
+ if(add_bias)
+ {
+ const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
+ validate(bias.info()->valid_region(), valid_region_bias);
+ }
+
+ // Validate padding
+ const PaddingSize padding(0);
+ validate(in.info()->padding(), padding);
+ validate(out.info()->padding(), padding);
+
+ if(add_bias)
+ {
+ validate(bias.info()->padding(), padding);
+ }
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
+ quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
+TEST_SUITE(BoundedReLu)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
+ quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // BoundedReLu
+
+TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint
+
TEST_SUITE_END() // OutputStage
TEST_SUITE_END() // GEMMLowp
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index ad5acfc418..8385221c78 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -385,6 +385,98 @@ protected:
SimpleTensor<uint8_t> _reference{};
};
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
+ {
+ _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
+ _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ std::uniform_int_distribution<> distribution(-6000, 6000);
+ library->fill(tensor, distribution, i);
+ }
+
+ TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
+ {
+ TensorShape shape_bias(shape[0]);
+
+ // Create tensors
+ TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
+ TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
+ TensorType c = create_tensor<TensorType>(shape, DataType::QSYMM16, 1);
+
+ // Create and configure function
+ FunctionType output_stage;
+ output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, min, max);
+
+ ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ c.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensor
+ fill(AccessorType(a), 0);
+
+ if(add_bias)
+ {
+ ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate bias tensor
+ b.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensor
+ fill(AccessorType(b), 1);
+ }
+
+ // Compute GEMM function
+ output_stage.run();
+ return c;
+ }
+
+ SimpleTensor<int16_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t min, int32_t max,
+ bool add_bias)
+ {
+ // Create reference
+ TensorShape shape_bias(shape[0]);
+
+ SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
+ SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
+
+ // Fill reference
+ fill(a, 0);
+
+ if(add_bias)
+ {
+ // Fill bias
+ fill(b, 1);
+
+ return reference::gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint<int32_t>(a, b, result_fixed_point_multiplier, result_shift, min, max);
+ }
+ else
+ {
+ return reference::gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint<int32_t>(a, result_fixed_point_multiplier, result_shift, min, max);
+ }
+ }
+
+ TensorType _target{};
+ SimpleTensor<int16_t> _reference{};
+};
+
template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture
{
diff --git a/tests/validation/reference/GEMMLowp.cpp b/tests/validation/reference/GEMMLowp.cpp
index 9a7e409e8a..97d05327e7 100644
--- a/tests/validation/reference/GEMMLowp.cpp
+++ b/tests/validation/reference/GEMMLowp.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -95,6 +95,34 @@ void quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> *in,
(*dst)[i] = static_cast<uint8_t>(std::max(0, std::min(255, result)));
}
}
+
+template <typename T>
+void quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> *in, const SimpleTensor<T> *bias, SimpleTensor<int16_t> *dst, int32_t result_fixedpoint_multiplier, int32_t result_shift,
+ int32_t min, int32_t max)
+{
+ const int cols_in = in->shape().x();
+
+ for(int i = 0; i < in->num_elements(); ++i)
+ {
+ int32_t result = (*in)[i];
+
+ if(bias != nullptr)
+ {
+ result += (*bias)[i % cols_in];
+ }
+
+ // Fixed point multiplication
+ result = asymm_rounding_divide_by_pow2(asymm_int_mult(result, result_fixedpoint_multiplier), result_shift);
+
+ // Bounded ReLu
+ if(min != max)
+ {
+ result = std::max(min, std::min(max, result));
+ }
+
+ (*dst)[i] = static_cast<int16_t>(std::max(-32768, std::min(32767, result)));
+ }
+}
} // namespace
template <typename T_out, typename T_in>
@@ -201,10 +229,36 @@ SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(
return dst;
}
+template <typename T>
+SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min,
+ int32_t max)
+{
+ SimpleTensor<int16_t> dst(in.shape(), DataType::QSYMM16);
+
+ quantize_down_int32_to_int16_scale_by_fixedpoint<T>(&in, nullptr, &dst, result_fixedpoint_multiplier, result_shift, min, max);
+
+ return dst;
+}
+
+template <typename T>
+SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier, int32_t result_shift,
+ int32_t min, int32_t max)
+{
+ SimpleTensor<int16_t> dst(in.shape(), DataType::QSYMM16);
+
+ quantize_down_int32_to_int16_scale_by_fixedpoint<T>(&in, &bias, &dst, result_fixedpoint_multiplier, result_shift, min, max);
+
+ return dst;
+}
+
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, int32_t result_fixedpoint_multiplier, int32_t result_shift,
int32_t result_offset_after_shift, int32_t min, int32_t max);
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_fixedpoint_multiplier,
int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max);
+template SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, int32_t result_fixedpoint_multiplier, int32_t result_shift,
+ int32_t min, int32_t max);
+template SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_fixedpoint_multiplier,
+ int32_t result_shift, int32_t min, int32_t max);
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min,
int32_t max);
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_offset, int32_t result_mult_int,
diff --git a/tests/validation/reference/GEMMLowp.h b/tests/validation/reference/GEMMLowp.h
index 4396155b96..5581f67652 100644
--- a/tests/validation/reference/GEMMLowp.h
+++ b/tests/validation/reference/GEMMLowp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,6 +58,13 @@ SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(
template <typename T>
SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier, int32_t result_shift,
int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0);
+
+template <typename T>
+SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, int32_t result_fixedpoint_multiplier, int32_t result_shift,
+ int32_t min, int32_t max);
+template <typename T>
+SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier,
+ int32_t result_shift, int32_t min, int32_t max);
} // namespace reference
} // namespace validation
} // namespace test