From a8a7c1d2691fc688dabc275d8b68e5a1267f00af Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Tue, 12 May 2020 22:01:23 +0100 Subject: COMPMID-3295: Static input for ActivationLayer test suite - A member function added to AssetsLibrary to fill tensors with static values. - ActivationLayerFixture has been modified use the new function. - Redundant nightly tests are removed. Change-Id: Ib2a1103a1e438e808183170dc9d097599523c6ec Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3188 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- tests/AssetsLibrary.h | 68 ++++++++++++++++++--- tests/validation/CL/ActivationLayer.cpp | 55 ++++++----------- tests/validation/NEON/ActivationLayer.cpp | 71 ++++++---------------- tests/validation/fixtures/ActivationLayerFixture.h | 49 +++++++++++---- 4 files changed, 132 insertions(+), 111 deletions(-) diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h index 84653ed089..16ebff7ec0 100644 --- a/tests/AssetsLibrary.h +++ b/tests/AssetsLibrary.h @@ -392,6 +392,19 @@ public: template void fill_tensor_value(T &&tensor, D value) const; + /** Fill a tensor with a given vector with static values. + * + * @param[in, out] tensor To be filled tensor. + * @param[in] values A vector containing values + * + * To cope with various size tensors, the vector size doens't have to be + * the same as tensor's size. If the size of the tensor is larger than the vector, + * the iterator the vector will keep iterating and wrap around. If the vector is + * larger, values located after the required size won't be used. + */ + template + void fill_static_values(T &&tensor, const std::vector &values) const; + private: // Function prototype to convert between image formats. using Converter = void (*)(const RawTensor &src, RawTensor &dst); @@ -399,6 +412,9 @@ private: using Extractor = void (*)(const RawTensor &src, RawTensor &dst); // Function prototype to load an image file. using Loader = RawTensor (*)(const std::string &path); + // Function type to generate a number to fill tensors. + template + using GeneratorFunctionType = std::function; const Converter &get_converter(Format src, Format dst) const; const Converter &get_converter(DataType src, Format dst) const; @@ -443,6 +459,14 @@ private: */ const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format, Channel channel) const; + /** Fill a tensor with a value generator function. + * + * @param[in, out] tensor To be filled tensor. + * @param[in] generate_value A function that generates values. + */ + template + void fill_with_generator(T &&tensor, const GeneratorFunctionType &generate_value) const; + mutable TensorCache _cache{}; mutable arm_compute::Mutex _format_lock{}; mutable arm_compute::Mutex _channel_lock{}; @@ -556,13 +580,9 @@ void AssetsLibrary::fill(std::vector &vec, D &&distribution, std::random_devi } } -template -void AssetsLibrary::fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const +template +void AssetsLibrary::fill_with_generator(T &&tensor, const GeneratorFunctionType &generate_value) const { - using ResultType = typename std::remove_reference::type::result_type; - - std::mt19937 gen(_seed + seed_offset); - const bool is_nhwc = tensor.data_layout() == DataLayout::NHWC; TensorShape shape(tensor.shape()); @@ -587,16 +607,50 @@ void AssetsLibrary::fill(T &&tensor, D &&distribution, std::random_device::resul // Iterate over all channels for(int channel = 0; channel < tensor.num_channels(); ++channel) { - const ResultType value = distribution(gen); + const ResultType value = generate_value(); ResultType &target_value = reinterpret_cast(tensor(id))[channel]; store_value_with_data_type(&target_value, value, tensor.data_type()); } } +} +template +void AssetsLibrary::fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const +{ + using ResultType = typename std::remove_reference::type::result_type; + std::mt19937 gen(_seed + seed_offset); + + GeneratorFunctionType number_generator = [&]() + { + const ResultType value = distribution(gen); + return value; + }; + + fill_with_generator(tensor, number_generator); fill_borders_with_garbage(tensor, distribution, seed_offset); } +template +void AssetsLibrary::fill_static_values(T &&tensor, const std::vector &values) const +{ + auto it = values.begin(); + GeneratorFunctionType get_next_value = [&]() + { + const DataType value = *it; + ++it; + + if(it == values.end()) + { + it = values.begin(); + } + + return value; + }; + + fill_with_generator(tensor, get_next_value); +} + template void AssetsLibrary::fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const { diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index 9ebbedc3c4..b32e3791f5 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -188,32 +188,19 @@ using CLActivationLayerFixture = ActivationValidationFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), + framework::dataset::make("DataType", + DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); } /** [CLActivationLayer Test snippet] **/ -FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::F16))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} TEST_SUITE_END() // FP16 TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType", - DataType::F32))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} -FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset), framework::dataset::make("DataType", - DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType", + DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); @@ -234,38 +221,30 @@ const auto QuantizedActivationDataset16 = combine(combine(framework::dataset::ma TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) -FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8), - framework::dataset::make("DataType", - DataType::QASYMM8)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8), + framework::dataset::make("DataType", + DataType::QASYMM8)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); } TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) -FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8), - framework::dataset::make("DataType", - DataType::QASYMM8_SIGNED)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 10.0f) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8), + framework::dataset::make("DataType", + DataType::QASYMM8_SIGNED)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 10.0f) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); } TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE(QSYMM16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset16), - framework::dataset::make("DataType", - DataType::QSYMM16)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qsymm16); -} -FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset16), - framework::dataset::make("DataType", - DataType::QSYMM16)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset16), + framework::dataset::make("DataType", + DataType::QSYMM16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qsymm16); diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index de8003434f..063bfaa2cd 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -198,16 +198,9 @@ using NEActivationLayerFixture = ActivationValidationFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::F16))) -{ - // Validate output - validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); -} -FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), + framework::dataset::make("DataType", + DataType::F16))) { // Validate output validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); @@ -216,19 +209,13 @@ TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType", - DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType", + DataType::F32))) { // Validate output validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset), - framework::dataset::make("DataType", DataType::F32))) -{ - // Validate output - validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); -} TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float @@ -249,18 +236,10 @@ const auto QuantizedActivationDataset = combine(combine(framework::dataset::make TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) -FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset), - framework::dataset::make("DataType", - DataType::QASYMM8)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8); -} -FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset), - framework::dataset::make("DataType", - DataType::QASYMM8)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset), + framework::dataset::make("DataType", + DataType::QASYMM8)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -268,18 +247,10 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture, fra TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) -FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset), - framework::dataset::make("DataType", - DataType::QASYMM8_SIGNED)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10.0f) }))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8); -} -FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset), - framework::dataset::make("DataType", - DataType::QASYMM8_SIGNED)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10.0f) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset), + framework::dataset::make("DataType", + DataType::QASYMM8_SIGNED)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10.0f) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -294,18 +265,10 @@ const auto Int16QuantizedActivationDataset = combine(combine(framework::dataset: framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); TEST_SUITE(QSYMM16) -FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), Int16QuantizedActivationDataset), - framework::dataset::make("DataType", - DataType::QSYMM16)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) }))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qsymm16); -} -FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), Int16QuantizedActivationDataset), - framework::dataset::make("DataType", - DataType::QSYMM16)), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), Int16QuantizedActivationDataset), + framework::dataset::make("DataType", + DataType::QSYMM16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qsymm16); diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h index 3294986519..551ee2d8cf 100644 --- a/tests/validation/fixtures/ActivationLayerFixture.h +++ b/tests/validation/fixtures/ActivationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -68,6 +68,37 @@ public: } protected: + std::vector get_boundary_values(T min, T max) + { + // This function will return a vector filled with the following values that can + // represent two partitions derived from equivalent partitioning. + // * Lower parition: min, min + delta, lower quarter (nominal), center - delta + // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max + const auto delta = is_data_type_float(_data_type) ? T(0.1f) : T(1); + const auto center_value = (min + max) / 2; + const auto lower_quarter = (min + center_value) / 2; + const auto upper_quarter = (center_value + max) / 2; + + std::vector boundary_values{}; + + // To ensure all the inserted values are within the given range after subtracing/adding delta + auto insert_values = [&boundary_values, &min, &max](const std::initializer_list &new_values) + { + for(auto &v : new_values) + { + if(v >= min && v <= max) + { + boundary_values.emplace_back(v); + } + } + }; + + insert_values({ min, static_cast(min + delta), static_cast(lower_quarter), static_cast(center_value - delta) }); // lower partition + insert_values({ static_cast(center_value), static_cast(center_value + delta), static_cast(upper_quarter), static_cast(max - delta), max }); // upper partition + + return boundary_values; + } + template void fill(U &&tensor) { @@ -76,20 +107,14 @@ protected: float min_bound = 0; float max_bound = 0; std::tie(min_bound, max_bound) = get_activation_layer_test_bounds(_function, _data_type); - std::uniform_real_distribution<> distribution(min_bound, max_bound); - library->fill(tensor, distribution, 0); - } - else if(is_data_type_quantized(tensor.data_type())) - { - library->fill_tensor_uniform(tensor, 0); + library->fill_static_values(tensor, get_boundary_values(static_cast(min_bound), static_cast(max_bound))); } else { - int min_bound = 0; - int max_bound = 0; - std::tie(min_bound, max_bound) = get_activation_layer_test_bounds(_function, _data_type); - std::uniform_int_distribution<> distribution(min_bound, max_bound); - library->fill(tensor, distribution, 0); + PixelValue min{}; + PixelValue max{}; + std::tie(min, max) = get_min_max(tensor.data_type()); + library->fill_static_values(tensor, get_boundary_values(min.get(), max.get())); } } -- cgit v1.2.1