aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2020-05-12 22:01:23 +0100
committerSang-Hoon Park <sang-hoon.park@arm.com>2020-05-14 10:41:01 +0000
commita8a7c1d2691fc688dabc275d8b68e5a1267f00af (patch)
tree54bd44259d715df0fec12f0c98614646b902a1a0
parent1a378107af40669eaa23a12e064bb8fabff2473e (diff)
downloadComputeLibrary-a8a7c1d2691fc688dabc275d8b68e5a1267f00af.tar.gz
COMPMID-3295: Static input for ActivationLayer test suite
- A member function added to AssetsLibrary to fill tensors with static values. - ActivationLayerFixture has been modified use the new function. - Redundant nightly tests are removed. Change-Id: Ib2a1103a1e438e808183170dc9d097599523c6ec Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3188 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--tests/AssetsLibrary.h68
-rw-r--r--tests/validation/CL/ActivationLayer.cpp55
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp71
-rw-r--r--tests/validation/fixtures/ActivationLayerFixture.h49
4 files changed, 132 insertions, 111 deletions
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index 84653ed089..16ebff7ec0 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -392,6 +392,19 @@ public:
template <typename T, typename D>
void fill_tensor_value(T &&tensor, D value) const;
+ /** Fill a tensor with a given vector with static values.
+ *
+ * @param[in, out] tensor To be filled tensor.
+ * @param[in] values A vector containing values
+ *
+ * To cope with various size tensors, the vector size doens't have to be
+ * the same as tensor's size. If the size of the tensor is larger than the vector,
+ * the iterator the vector will keep iterating and wrap around. If the vector is
+ * larger, values located after the required size won't be used.
+ */
+ template <typename T, typename DataType>
+ void fill_static_values(T &&tensor, const std::vector<DataType> &values) const;
+
private:
// Function prototype to convert between image formats.
using Converter = void (*)(const RawTensor &src, RawTensor &dst);
@@ -399,6 +412,9 @@ private:
using Extractor = void (*)(const RawTensor &src, RawTensor &dst);
// Function prototype to load an image file.
using Loader = RawTensor (*)(const std::string &path);
+ // Function type to generate a number to fill tensors.
+ template <typename ResultType>
+ using GeneratorFunctionType = std::function<ResultType(void)>;
const Converter &get_converter(Format src, Format dst) const;
const Converter &get_converter(DataType src, Format dst) const;
@@ -443,6 +459,14 @@ private:
*/
const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format, Channel channel) const;
+ /** Fill a tensor with a value generator function.
+ *
+ * @param[in, out] tensor To be filled tensor.
+ * @param[in] generate_value A function that generates values.
+ */
+ template <typename T, typename ResultType>
+ void fill_with_generator(T &&tensor, const GeneratorFunctionType<ResultType> &generate_value) const;
+
mutable TensorCache _cache{};
mutable arm_compute::Mutex _format_lock{};
mutable arm_compute::Mutex _channel_lock{};
@@ -556,13 +580,9 @@ void AssetsLibrary::fill(std::vector<T> &vec, D &&distribution, std::random_devi
}
}
-template <typename T, typename D>
-void AssetsLibrary::fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
+template <typename T, typename ResultType>
+void AssetsLibrary::fill_with_generator(T &&tensor, const GeneratorFunctionType<ResultType> &generate_value) const
{
- using ResultType = typename std::remove_reference<D>::type::result_type;
-
- std::mt19937 gen(_seed + seed_offset);
-
const bool is_nhwc = tensor.data_layout() == DataLayout::NHWC;
TensorShape shape(tensor.shape());
@@ -587,16 +607,50 @@ void AssetsLibrary::fill(T &&tensor, D &&distribution, std::random_device::resul
// Iterate over all channels
for(int channel = 0; channel < tensor.num_channels(); ++channel)
{
- const ResultType value = distribution(gen);
+ const ResultType value = generate_value();
ResultType &target_value = reinterpret_cast<ResultType *>(tensor(id))[channel];
store_value_with_data_type(&target_value, value, tensor.data_type());
}
}
+}
+template <typename T, typename D>
+void AssetsLibrary::fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
+{
+ using ResultType = typename std::remove_reference<D>::type::result_type;
+ std::mt19937 gen(_seed + seed_offset);
+
+ GeneratorFunctionType<ResultType> number_generator = [&]()
+ {
+ const ResultType value = distribution(gen);
+ return value;
+ };
+
+ fill_with_generator(tensor, number_generator);
fill_borders_with_garbage(tensor, distribution, seed_offset);
}
+template <typename T, typename DataType>
+void AssetsLibrary::fill_static_values(T &&tensor, const std::vector<DataType> &values) const
+{
+ auto it = values.begin();
+ GeneratorFunctionType<DataType> get_next_value = [&]()
+ {
+ const DataType value = *it;
+ ++it;
+
+ if(it == values.end())
+ {
+ it = values.begin();
+ }
+
+ return value;
+ };
+
+ fill_with_generator(tensor, get_next_value);
+}
+
template <typename D>
void AssetsLibrary::fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const
{
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index 9ebbedc3c4..b32e3791f5 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -188,32 +188,19 @@ using CLActivationLayerFixture = ActivationValidationFixture<CLTensor, CLAccesso
TEST_SUITE(Float)
TEST_SUITE(FP16)
/** [CLActivationLayer Test snippet] **/
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::F16)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::F16)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
}
/** [CLActivationLayer Test snippet] **/
-FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::F16)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
-}
TEST_SUITE_END() // FP16
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType",
- DataType::F32)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType",
+ DataType::F32)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
@@ -234,38 +221,30 @@ const auto QuantizedActivationDataset16 = combine(combine(framework::dataset::ma
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
- framework::dataset::make("DataType",
- DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
}
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
- framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 10.0f) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 10.0f) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
}
TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE(QSYMM16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset16),
- framework::dataset::make("DataType",
- DataType::QSYMM16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qsymm16);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset16),
- framework::dataset::make("DataType",
- DataType::QSYMM16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset16),
+ framework::dataset::make("DataType",
+ DataType::QSYMM16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qsymm16);
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index de8003434f..063bfaa2cd 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -198,16 +198,9 @@ using NEActivationLayerFixture = ActivationValidationFixture<Tensor, Accessor, N
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::F16)))
-{
- // Validate output
- validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function));
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::F16)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::F16)))
{
// Validate output
validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function));
@@ -216,19 +209,13 @@ TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType",
+ DataType::F32)))
{
// Validate output
validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function));
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset),
- framework::dataset::make("DataType", DataType::F32)))
-{
- // Validate output
- validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function));
-}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
@@ -249,18 +236,10 @@ const auto QuantizedActivationDataset = combine(combine(framework::dataset::make
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset),
- framework::dataset::make("DataType",
- DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qasymm8);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset),
- framework::dataset::make("DataType",
- DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -268,18 +247,10 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture<uint8_t>, fra
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset),
- framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10.0f) })))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qasymm8);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset),
- framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10.0f) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10.0f) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -294,18 +265,10 @@ const auto Int16QuantizedActivationDataset = combine(combine(framework::dataset:
framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
TEST_SUITE(QSYMM16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), Int16QuantizedActivationDataset),
- framework::dataset::make("DataType",
- DataType::QSYMM16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) })))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qsymm16);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), Int16QuantizedActivationDataset),
- framework::dataset::make("DataType",
- DataType::QSYMM16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), Int16QuantizedActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::QSYMM16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qsymm16);
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index 3294986519..551ee2d8cf 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -68,6 +68,37 @@ public:
}
protected:
+ std::vector<T> get_boundary_values(T min, T max)
+ {
+ // This function will return a vector filled with the following values that can
+ // represent two partitions derived from equivalent partitioning.
+ // * Lower parition: min, min + delta, lower quarter (nominal), center - delta
+ // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max
+ const auto delta = is_data_type_float(_data_type) ? T(0.1f) : T(1);
+ const auto center_value = (min + max) / 2;
+ const auto lower_quarter = (min + center_value) / 2;
+ const auto upper_quarter = (center_value + max) / 2;
+
+ std::vector<T> boundary_values{};
+
+ // To ensure all the inserted values are within the given range after subtracing/adding delta
+ auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values)
+ {
+ for(auto &v : new_values)
+ {
+ if(v >= min && v <= max)
+ {
+ boundary_values.emplace_back(v);
+ }
+ }
+ };
+
+ insert_values({ min, static_cast<T>(min + delta), static_cast<T>(lower_quarter), static_cast<T>(center_value - delta) }); // lower partition
+ insert_values({ static_cast<T>(center_value), static_cast<T>(center_value + delta), static_cast<T>(upper_quarter), static_cast<T>(max - delta), max }); // upper partition
+
+ return boundary_values;
+ }
+
template <typename U>
void fill(U &&tensor)
{
@@ -76,20 +107,14 @@ protected:
float min_bound = 0;
float max_bound = 0;
std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type);
- std::uniform_real_distribution<> distribution(min_bound, max_bound);
- library->fill(tensor, distribution, 0);
- }
- else if(is_data_type_quantized(tensor.data_type()))
- {
- library->fill_tensor_uniform(tensor, 0);
+ library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound)));
}
else
{
- int min_bound = 0;
- int max_bound = 0;
- std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type);
- std::uniform_int_distribution<> distribution(min_bound, max_bound);
- library->fill(tensor, distribution, 0);
+ PixelValue min{};
+ PixelValue max{};
+ std::tie(min, max) = get_min_max(tensor.data_type());
+ library->fill_static_values(tensor, get_boundary_values(min.get<T>(), max.get<T>()));
}
}