diff options
25 files changed, 486 insertions, 168 deletions
diff --git a/tests/validation/NEON/QuantizationLayer.cpp b/tests/validation/NEON/QuantizationLayer.cpp index bab7490762..fac5d73abd 100644 --- a/tests/validation/NEON/QuantizationLayer.cpp +++ b/tests/validation/NEON/QuantizationLayer.cpp @@ -125,50 +125,90 @@ FIXTURE_DATA_TEST_CASE(RunLargeQASYMM16, NEQuantizationLayerQASYMM16Fixture<floa validate(Accessor(_target), _reference, tolerance_u16); } TEST_SUITE_END() // FP32 -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes, framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_u8); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_u8); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8Signed, NEQuantizationLayerQASYMM8SignedFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(QuantizationSmallShapes, framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_s8); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_s8); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, NEQuantizationLayerQASYMM16Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes, framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_u16); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_u16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, NEQuantizationLayerQASYMM8Fixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes, framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_u8); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_u8); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunLargeQASYMM16, NEQuantizationLayerQASYMM16Fixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes, framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_u16); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_u16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() // FP16 -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif // ARM_COMPUTE_ENABLE_FP16 TEST_SUITE_END() // Float TEST_SUITE(Quantized) diff --git a/tests/validation/NEON/RNNLayer.cpp b/tests/validation/NEON/RNNLayer.cpp index 979aa0f2c5..d6e4b7ac0e 100644 --- a/tests/validation/NEON/RNNLayer.cpp +++ b/tests/validation/NEON/RNNLayer.cpp @@ -40,10 +40,10 @@ namespace validation namespace { RelativeTolerance<float> tolerance_f32(0.001f); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType:F32 */ -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 RelativeTolerance<half> tolerance_f16(half(0.1)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType:F16 */ constexpr float abs_tolerance_f16(0.02f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType:F16 */ -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ } // namespace TEST_SUITE(NEON) @@ -134,15 +134,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NERNNLayerFixture<float>, framework::DatasetMod } TEST_SUITE_END() // FP32 -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NERNNLayerFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallRNNLayerDataset(), framework::dataset::make("DataType", DataType::F16))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_f16, 0.02f, abs_tolerance_f16); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_f16, 0.02f, abs_tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() // FP16 -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ TEST_SUITE_END() // RNNLayer TEST_SUITE_END() // Neon } // namespace validation diff --git a/tests/validation/NEON/ROIAlignLayer.cpp b/tests/validation/NEON/ROIAlignLayer.cpp index 98c92a0b20..1f3db04ffd 100644 --- a/tests/validation/NEON/ROIAlignLayer.cpp +++ b/tests/validation/NEON/ROIAlignLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,10 +47,10 @@ namespace RelativeTolerance<float> relative_tolerance_f32(0.01f); AbsoluteTolerance<float> absolute_tolerance_f32(0.001f); -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 RelativeTolerance<float> relative_tolerance_f16(0.01f); AbsoluteTolerance<float> absolute_tolerance_f16(0.001f); -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif // ARM_COMPUTE_ENABLE_FP16 constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_s(1); @@ -115,17 +115,25 @@ FIXTURE_DATA_TEST_CASE(SmallROIAlignLayerFloat, NEROIAlignLayerFloatFixture, fra // Validate output validate(Accessor(_target), _reference, relative_tolerance_f32, .02f, absolute_tolerance_f32); } -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 using NEROIAlignLayerHalfFixture = ROIAlignLayerFixture<Tensor, Accessor, NEROIAlignLayer, half, half>; FIXTURE_DATA_TEST_CASE(SmallROIAlignLayerHalf, NEROIAlignLayerHalfFixture, framework::DatasetMode::ALL, framework::dataset::combine(framework::dataset::combine(datasets::SmallROIDataset(), framework::dataset::make("DataType", { DataType::F16 })), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { - // Validate output - validate(Accessor(_target), _reference, relative_tolerance_f16, .02f, absolute_tolerance_f16); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, relative_tolerance_f16, .02f, absolute_tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif // ARM_COMPUTE_ENABLE_FP16 TEST_SUITE_END() // Float diff --git a/tests/validation/NEON/Range.cpp b/tests/validation/NEON/Range.cpp index fda7b2c448..0df5e86186 100644 --- a/tests/validation/NEON/Range.cpp +++ b/tests/validation/NEON/Range.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -144,7 +144,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NERangeFixture<int16_t>, framework::DatasetMode TEST_SUITE_END() // S16 TEST_SUITE(Float) -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NERangeFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine( framework::dataset::make("DataType", DataType::F16), @@ -152,11 +152,19 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NERangeFixture<half>, framework::DatasetMode::P float_step_dataset), framework::dataset::make("QuantizationInfo", { QuantizationInfo() }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance, 0.f, abs_tolerance); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance, 0.f, abs_tolerance); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() // FP16 -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif // ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, NERangeFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine( diff --git a/tests/validation/NEON/ReduceMean.cpp b/tests/validation/NEON/ReduceMean.cpp index 8ca0bb53a7..e5692693bd 100644 --- a/tests/validation/NEON/ReduceMean.cpp +++ b/tests/validation/NEON/ReduceMean.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,9 +43,9 @@ namespace validation namespace { constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */ -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 constexpr AbsoluteTolerance<float> tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif // ARM_COMPUTE_ENABLE_FP16 #ifdef __aarch64__ constexpr AbsoluteTolerance<uint8_t> tolerance_u8(1); /**< Tolerance value for comparing reference's output against implementation's output for unsigned 8-bit asymmetric quantized type */ constexpr AbsoluteTolerance<int8_t> tolerance_s8(1); /**< Tolerance value for comparing reference's output against implementation's output for signed 8-bit asymmetric quantized type */ @@ -93,15 +93,23 @@ using NEReduceMeanFixture = ReduceMeanFixture<Tensor, Accessor, NEReduceMean, T> TEST_SUITE(Float) -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NEReduceMeanFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), concat(axis_keep, axis_drop))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunLarge, @@ -109,11 +117,19 @@ FIXTURE_DATA_TEST_CASE(RunLarge, framework::DatasetMode::NIGHTLY, combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F16)), concat(axis_keep, axis_drop))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() // FP16 -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif // ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, NEReduceMeanFixture<float>, diff --git a/tests/validation/NEON/Reverse.cpp b/tests/validation/NEON/Reverse.cpp index 7b5337f14b..7d99bd614d 100644 --- a/tests/validation/NEON/Reverse.cpp +++ b/tests/validation/NEON/Reverse.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -94,7 +94,7 @@ using NEReverseFixture = ReverseValidationFixture<Tensor, Accessor, NEReverse, T TEST_SUITE(Float) -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(F16) FIXTURE_DATA_TEST_CASE(RunSmall, NEReverseFixture<half>, @@ -105,8 +105,16 @@ FIXTURE_DATA_TEST_CASE(RunSmall, make("use_negative_axis", { true, false }), make("use_inverted_axis", { true, false }))) { - // Validate output - validate(Accessor(_target), _reference); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunLarge, @@ -118,11 +126,19 @@ FIXTURE_DATA_TEST_CASE(RunLarge, make("use_negative_axis", { true, false }), make("use_inverted_axis", { true, false }))) { - // Validate output - validate(Accessor(_target), _reference); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() // F16 -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, diff --git a/tests/validation/NEON/Scale.cpp b/tests/validation/NEON/Scale.cpp index f1209a21ac..55de2d6281 100644 --- a/tests/validation/NEON/Scale.cpp +++ b/tests/validation/NEON/Scale.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2023 Arm Limited. + * Copyright (c) 2017-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -79,10 +79,10 @@ constexpr AbsoluteTolerance<uint8_t> tolerance_u8(1); constexpr AbsoluteTolerance<int8_t> tolerance_s8(1); constexpr AbsoluteTolerance<int16_t> tolerance_s16(1); RelativeTolerance<float> tolerance_f32(0.05); -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 constexpr float abs_tolerance_f16(0.01f); RelativeTolerance<half> tolerance_f16(half(0.1)); -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ constexpr float tolerance_num_s16 = 0.01f; constexpr float tolerance_num_f32 = 0.01f; @@ -153,9 +153,9 @@ TEST_CASE(SupportDataType, framework::DatasetMode::ALL) { DataType::U64, false }, { DataType::S64, false }, { DataType::BFLOAT16, false }, -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 { DataType::F16, true }, -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif // ARM_COMPUTE_ENABLE_FP16 { DataType::F32, true }, { DataType::F64, false }, { DataType::SIZET, false }, @@ -381,57 +381,97 @@ FIXTURE_DATA_TEST_CASE(RunMediumAlignCornersNHWC, NEScaleFixture<float>, framewo validate(Accessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32); } TEST_SUITE_END() // FP32 -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP16) const auto f16_shape = combine((SCALE_SHAPE_DATASET(num_elements_per_vector<half>())), framework::dataset::make("DataType", DataType::F16)); const auto f16_shape_nhwc = combine(datasets::Small3DShapes(), framework::dataset::make("DataType", DataType::F16)); FIXTURE_DATA_TEST_CASE(RunSmall, NEScaleFixture<half>, framework::DatasetMode::ALL, ASSEMBLE_DATASET(f16_shape, ScaleSamplingPolicySet)) { - //Create valid region - TensorInfo src_info(_shape, 1, _data_type); - const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); + if(CPUInfo::get().has_fp16()) + { + //Create valid region + TensorInfo src_info(_shape, 1, _data_type); + const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); - // Validate output - validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + // Validate output + validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunSmallAlignCorners, NEScaleFixture<half>, framework::DatasetMode::ALL, ASSEMBLE_DATASET(f16_shape, ScaleAlignCornersSamplingPolicySet)) { - //Create valid region - TensorInfo src_info(_shape, 1, _data_type); - const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); + if(CPUInfo::get().has_fp16()) + { + //Create valid region + TensorInfo src_info(_shape, 1, _data_type); + const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); - // Validate output - validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + // Validate output + validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunMediumNHWC, NEScaleFixture<half>, framework::DatasetMode::ALL, ASSEMBLE_NHWC_DATASET(f16_shape_nhwc, ScaleSamplingPolicySet)) { - //Create valid region - TensorInfo src_info(_shape, 1, _data_type); - ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); + if(CPUInfo::get().has_fp16()) + { + //Create valid region + TensorInfo src_info(_shape, 1, _data_type); + ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); - // Validate output - validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + // Validate output + validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunMediumMixedDataLayoutNHWC, NEScaleMixedDataLayoutFixture<half>, framework::DatasetMode::PRECOMMIT, ASSEMBLE_NHWC_DATASET(f16_shape_nhwc, ScaleSamplingPolicySet)) { - //Create valid region - TensorInfo src_info(_shape, 1, _data_type); - ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); + if(CPUInfo::get().has_fp16()) + { + //Create valid region + TensorInfo src_info(_shape, 1, _data_type); + ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); - // Validate output - validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + // Validate output + validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunMediumAlignCornersNHWC, NEScaleFixture<half>, framework::DatasetMode::ALL, ASSEMBLE_NHWC_DATASET(f16_shape_nhwc, ScaleAlignCornersSamplingPolicySet)) { - //Create valid region - TensorInfo src_info(_shape, 1, _data_type); - ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); + if(CPUInfo::get().has_fp16()) + { + //Create valid region + TensorInfo src_info(_shape, 1, _data_type); + ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); - // Validate output - validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + // Validate output + validate(Accessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() // FP16 -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ TEST_SUITE_END() // Float TEST_SUITE(Integer) diff --git a/tests/validation/NEON/Select.cpp b/tests/validation/NEON/Select.cpp index 40744581b0..25d510aa64 100644 --- a/tests/validation/NEON/Select.cpp +++ b/tests/validation/NEON/Select.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -96,15 +96,22 @@ using NESelectFixture = SelectValidationFixture<Tensor, Accessor, NESelect, T>; TEST_SUITE(Float) -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(F16) FIXTURE_DATA_TEST_CASE(RunSmall, NESelectFixture<half>, framework::DatasetMode::PRECOMMIT, combine(run_small_dataset, framework::dataset::make("DataType", DataType::F16))) { - // Validate output - validate(Accessor(_target), _reference); + if(CPUInfo::get().has_fp16()) + { + validate(Accessor(_target), _reference); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunLarge, @@ -112,11 +119,18 @@ FIXTURE_DATA_TEST_CASE(RunLarge, framework::DatasetMode::NIGHTLY, combine(run_large_dataset, framework::dataset::make("DataType", DataType::F16))) { - // Validate output - validate(Accessor(_target), _reference); + if(CPUInfo::get().has_fp16()) + { + validate(Accessor(_target), _reference); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() // F16 -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, diff --git a/tests/validation/NEON/Slice.cpp b/tests/validation/NEON/Slice.cpp index d5549c8cdb..2ec6d09134 100644 --- a/tests/validation/NEON/Slice.cpp +++ b/tests/validation/NEON/Slice.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -67,15 +67,22 @@ template <typename T> using NESliceFixture = SliceFixture<Tensor, Accessor, NESlice, T>; TEST_SUITE(Float) -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NESliceFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallSliceDataset(), framework::dataset::make("DataType", DataType::F16))) { - // Validate output - validate(Accessor(_target), _reference); + if(CPUInfo::get().has_fp16()) + { + validate(Accessor(_target), _reference); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunLarge, @@ -83,11 +90,18 @@ FIXTURE_DATA_TEST_CASE(RunLarge, framework::DatasetMode::NIGHTLY, combine(datasets::LargeSliceDataset(), framework::dataset::make("DataType", DataType::F16))) { - // Validate output - validate(Accessor(_target), _reference); + if(CPUInfo::get().has_fp16()) + { + validate(Accessor(_target), _reference); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() // FP16 -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index 94d0866c38..e428d7958b 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -54,9 +54,9 @@ constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(1); /** CNN data types */ const auto CNNDataTypes = make("DataType", { -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 DataType::F16, -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ DataType::F32, }); } // namespace @@ -157,7 +157,7 @@ DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL, } TEST_SUITE(Float) -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine( @@ -166,8 +166,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerFixture<half>, framework::Datas make("Beta", { 1.0f, 2.0f }), make("Axis", { 0, -1 }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + if(CPUInfo::get().has_fp16()) + { + validate(Accessor(_target), _reference, tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine( @@ -176,8 +183,16 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture<half>, framework::Dataset make("Beta", { 1.0f, 2.0f }), make("Axis", { 0, 1 }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine( @@ -186,8 +201,16 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture<half>, framework::Datas make("Beta", { 1.0f }), make("Axis", { 0, 2, -1 }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine( @@ -196,11 +219,19 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixture<half>, framework::Dataset make("Beta", { 1.0f, 2.0f }), make("Axis", { 0 }))) { - // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + if(CPUInfo::get().has_fp16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() //FP16 -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerFixture<float>, framework::DatasetMode::PRECOMMIT, diff --git a/tests/validation/NEON/Split.cpp b/tests/validation/NEON/Split.cpp index 72df2ad663..d7aa2e532c 100644 --- a/tests/validation/NEON/Split.cpp +++ b/tests/validation/NEON/Split.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -98,17 +98,25 @@ template <typename T> using NESplitShapesFixture = SplitShapesFixture<Tensor, ITensor, Accessor, NESplit, T>; TEST_SUITE(Float) -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NESplitFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallSplitDataset(), framework::dataset::make("DataType", DataType::F16))) { - // Validate outputs - for(unsigned int i = 0; i < _target.size(); ++i) + if(CPUInfo::get().has_fp16()) { - validate(Accessor(_target[i]), _reference[i]); + // Validate outputs + for(unsigned int i = 0; i < _target.size(); ++i) + { + validate(Accessor(_target[i]), _reference[i]); + } + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); } } @@ -117,14 +125,22 @@ FIXTURE_DATA_TEST_CASE(RunLarge, framework::DatasetMode::NIGHTLY, combine(datasets::LargeSplitDataset(), framework::dataset::make("DataType", DataType::F16))) { - // Validate outputs - for(unsigned int i = 0; i < _target.size(); ++i) + if(CPUInfo::get().has_fp16()) { - validate(Accessor(_target[i]), _reference[i]); + // Validate outputs + for(unsigned int i = 0; i < _target.size(); ++i) + { + validate(Accessor(_target[i]), _reference[i]); + } + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); } } TEST_SUITE_END() // FP16 -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, diff --git a/tests/validation/NEON/StridedSlice.cpp b/tests/validation/NEON/StridedSlice.cpp index a1b3cef801..7c76800d1f 100644 --- a/tests/validation/NEON/StridedSlice.cpp +++ b/tests/validation/NEON/StridedSlice.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -69,15 +69,22 @@ template <typename T> using NEStridedSliceFixture = StridedSliceFixture<Tensor, Accessor, NEStridedSlice, T>; TEST_SUITE(Float) -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NEStridedSliceFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallStridedSliceDataset(), framework::dataset::make("DataType", DataType::F16))) { - // Validate output - validate(Accessor(_target), _reference); + if(CPUInfo::get().has_fp16()) + { + validate(Accessor(_target), _reference); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } FIXTURE_DATA_TEST_CASE(RunLarge, @@ -85,11 +92,18 @@ FIXTURE_DATA_TEST_CASE(RunLarge, framework::DatasetMode::NIGHTLY, combine(datasets::LargeStridedSliceDataset(), framework::dataset::make("DataType", DataType::F16))) { - // Validate output - validate(Accessor(_target), _reference); + if(CPUInfo::get().has_fp16()) + { + validate(Accessor(_target), _reference); + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); + } } TEST_SUITE_END() // FP16 -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, diff --git a/tests/validation/NEON/Unstack.cpp b/tests/validation/NEON/Unstack.cpp index 3e8f1ff324..18e778b9fd 100644 --- a/tests/validation/NEON/Unstack.cpp +++ b/tests/validation/NEON/Unstack.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -95,19 +95,28 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEUnstackFixture<float>, framework::DatasetMode } TEST_SUITE_END() // F32 -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#ifdef ARM_COMPUTE_ENABLE_FP16 TEST_SUITE(F16) FIXTURE_DATA_TEST_CASE(RunSmall, NEUnstackFixture<half>, framework::DatasetMode::PRECOMMIT, unstack_dataset_small * framework::dataset::make("DataType", { DataType::F16 })) { ARM_COMPUTE_ERROR_ON(_target.size() != _reference.size()); - // Validate output - for(size_t k = 0; k < _target.size(); ++k) + + if(CPUInfo::get().has_fp16()) { - validate(Accessor(_target[k]), _reference[k]); + // Validate output + for(size_t k = 0; k < _target.size(); ++k) + { + validate(Accessor(_target[k]), _reference[k]); + } + } + else + { + ARM_COMPUTE_TEST_INFO("Device does not support fp16 vector operations. Test SKIPPED."); + framework::ARM_COMPUTE_PRINT_INFO(); } } TEST_SUITE_END() // F16 -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ TEST_SUITE(Quantized) FIXTURE_DATA_TEST_CASE(RunSmall, NEUnstackFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, unstack_dataset_small * framework::dataset::make("DataType", { DataType::QASYMM8 })) diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h index 1b21967bda..1cc0a56399 100644 --- a/tests/validation/fixtures/QuantizationLayerFixture.h +++ b/tests/validation/fixtures/QuantizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_QUANTIZATION_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_QUANTIZATION_LAYER_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_QUANTIZATIONLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_QUANTIZATIONLAYERFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -48,6 +48,12 @@ class QuantizationValidationGenericFixture : public framework::Fixture public: void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + (data_type_in == DataType::F16 || data_type_out == DataType::F16) && !CPUInfo::get().has_fp16()) + { + return; + } + _target = compute_target(shape, data_type_in, data_type_out, qinfo, qinfo_in); _reference = compute_reference(shape, data_type_in, data_type_out, qinfo, qinfo_in); } @@ -116,4 +122,4 @@ public: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_QUANTIZATION_LAYER_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_QUANTIZATIONLAYERFIXTURE_H diff --git a/tests/validation/fixtures/RNNLayerFixture.h b/tests/validation/fixtures/RNNLayerFixture.h index e9a05e7838..8741ef4fae 100644 --- a/tests/validation/fixtures/RNNLayerFixture.h +++ b/tests/validation/fixtures/RNNLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_RNN_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_RNN_LAYER_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_RNNLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_RNNLAYERFIXTURE_H #include "tests/Globals.h" #include "tests/framework/Asserts.h" @@ -45,6 +45,12 @@ public: void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape recurrent_weights_shape, TensorShape bias_shape, TensorShape output_shape, ActivationLayerInfo info, DataType data_type) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _target = compute_target(input_shape, weights_shape, recurrent_weights_shape, bias_shape, output_shape, info, data_type); _reference = compute_reference(input_shape, weights_shape, recurrent_weights_shape, bias_shape, output_shape, info, data_type); } @@ -144,4 +150,4 @@ protected: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_RNN_LAYER_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_RNNLAYERFIXTURE_H diff --git a/tests/validation/fixtures/ROIAlignLayerFixture.h b/tests/validation/fixtures/ROIAlignLayerFixture.h index ad76dcbbd9..fd076862dd 100644 --- a/tests/validation/fixtures/ROIAlignLayerFixture.h +++ b/tests/validation/fixtures/ROIAlignLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_ROIALIGNLAYER_FIXTURE -#define ARM_COMPUTE_TEST_ROIALIGNLAYER_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_ROIALIGNLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_ROIALIGNLAYERFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -47,6 +47,12 @@ class ROIAlignLayerGenericFixture : public framework::Fixture public: void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _rois_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::QASYMM16 : data_type; _target = compute_target(input_shape, data_type, data_layout, pool_info, rois_shape, qinfo, output_qinfo); _reference = compute_reference(input_shape, data_type, pool_info, rois_shape, qinfo, output_qinfo); @@ -209,4 +215,4 @@ public: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_ROIALIGNLAYER_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_ROIALIGNLAYERFIXTURE_H diff --git a/tests/validation/fixtures/RangeFixture.h b/tests/validation/fixtures/RangeFixture.h index 166613a318..50682e979e 100644 --- a/tests/validation/fixtures/RangeFixture.h +++ b/tests/validation/fixtures/RangeFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_RANGE_FIXTURE -#define ARM_COMPUTE_TEST_RANGE_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_RANGEFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_RANGEFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -57,6 +57,12 @@ class RangeFixture : public framework::Fixture public: void setup(const DataType data_type0, float start, float step, const QuantizationInfo qinfo0 = QuantizationInfo()) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type0 == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _target = compute_target(data_type0, qinfo0, start, step); _reference = compute_reference(data_type0, qinfo0, start, step); } @@ -138,4 +144,4 @@ protected: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_RANGE_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_RANGEFIXTURE_H diff --git a/tests/validation/fixtures/ReduceMeanFixture.h b/tests/validation/fixtures/ReduceMeanFixture.h index e61941435c..9f18497095 100644 --- a/tests/validation/fixtures/ReduceMeanFixture.h +++ b/tests/validation/fixtures/ReduceMeanFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_REDUCE_MEAN_FIXTURE -#define ARM_COMPUTE_TEST_REDUCE_MEAN_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_REDUCEMEANFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_REDUCEMEANFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -49,6 +49,12 @@ class ReduceMeanValidationFixture : public framework::Fixture public: void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info_input, QuantizationInfo quantization_info_output) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _target = compute_target(shape, data_type, axis, keep_dims, quantization_info_input, quantization_info_output); _reference = compute_reference(shape, data_type, axis, keep_dims, quantization_info_input, quantization_info_output); } @@ -172,4 +178,4 @@ public: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_REDUCE_MEAN_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_REDUCEMEANFIXTURE_H diff --git a/tests/validation/fixtures/ReverseFixture.h b/tests/validation/fixtures/ReverseFixture.h index 856bff7b12..5bb8f876d2 100644 --- a/tests/validation/fixtures/ReverseFixture.h +++ b/tests/validation/fixtures/ReverseFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,6 +47,12 @@ class ReverseValidationFixture : public framework::Fixture public: void setup(TensorShape shape, TensorShape axis_shape, DataType data_type, bool use_negative_axis = false, bool use_inverted_axis = false) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _num_dims = shape.num_dimensions(); _target = compute_target(shape, axis_shape, data_type, use_negative_axis, use_inverted_axis); _reference = compute_reference(shape, axis_shape, data_type, use_negative_axis, use_inverted_axis); diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h index 86d89d71f7..03a7ca6ab3 100644 --- a/tests/validation/fixtures/ScaleFixture.h +++ b/tests/validation/fixtures/ScaleFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2023 Arm Limited. + * Copyright (c) 2017-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -42,6 +42,12 @@ public: void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy, bool align_corners, bool mixed_layout, QuantizationInfo output_quantization_info) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _shape = shape; _policy = policy; _border_mode = border_mode; diff --git a/tests/validation/fixtures/SelectFixture.h b/tests/validation/fixtures/SelectFixture.h index 8cb6f062f9..eef86b808e 100644 --- a/tests/validation/fixtures/SelectFixture.h +++ b/tests/validation/fixtures/SelectFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_SELECT_FIXTURE -#define ARM_COMPUTE_TEST_SELECT_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_SELECTFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_SELECTFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -65,6 +65,12 @@ class SelectValidationFixture : public framework::Fixture public: void setup(TensorShape shape, bool has_same_same_rank, DataType data_type) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + TensorShape condition_shape = detail::select_condition_shape(shape, has_same_same_rank); _target = compute_target(shape, condition_shape, data_type); @@ -144,4 +150,4 @@ protected: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_SELECT_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_SELECTFIXTURE_H diff --git a/tests/validation/fixtures/SliceOperationsFixtures.h b/tests/validation/fixtures/SliceOperationsFixtures.h index b1f91ea2e0..65b8fb88d2 100644 --- a/tests/validation/fixtures/SliceOperationsFixtures.h +++ b/tests/validation/fixtures/SliceOperationsFixtures.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_SLICE_OPERATIONS_FIXTURE -#define ARM_COMPUTE_TEST_SLICE_OPERATIONS_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_SLICEOPERATIONSFIXTURES_H +#define ACL_TESTS_VALIDATION_FIXTURES_SLICEOPERATIONSFIXTURES_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -47,6 +47,12 @@ class SliceFixture : public framework::Fixture public: void setup(TensorShape shape, Coordinates starts, Coordinates ends, DataType data_type) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _target = compute_target(shape, starts, ends, data_type); _reference = compute_reference(shape, starts, ends, data_type); } @@ -112,6 +118,12 @@ public: int32_t begin_mask, int32_t end_mask, int32_t shrink_mask, DataType data_type) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _target = compute_target(shape, starts, ends, strides, begin_mask, end_mask, shrink_mask, data_type); _reference = compute_reference(shape, starts, ends, strides, begin_mask, end_mask, shrink_mask, data_type); } @@ -176,4 +188,4 @@ protected: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_SLICE_OPERATIONS_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_SLICEOPERATIONSFIXTURES_H diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h index f4bf8df9c0..399a8b70c4 100644 --- a/tests/validation/fixtures/SoftmaxLayerFixture.h +++ b/tests/validation/fixtures/SoftmaxLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_SOFTMAX_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_SOFTMAX_LAYER_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_SOFTMAXLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_SOFTMAXLAYERFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -48,6 +48,12 @@ class SoftmaxValidationGenericFixture : public framework::Fixture public: void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, float beta, size_t axis) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _quantization_info = quantization_info; _reference = compute_reference(shape, data_type, quantization_info, beta, axis); @@ -157,4 +163,4 @@ public: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_SOFTMAX_LAYER_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_SOFTMAXLAYERFIXTURE_H diff --git a/tests/validation/fixtures/SplitFixture.h b/tests/validation/fixtures/SplitFixture.h index 203925329c..79ce152671 100644 --- a/tests/validation/fixtures/SplitFixture.h +++ b/tests/validation/fixtures/SplitFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_SPLIT_FIXTURE -#define ARM_COMPUTE_TEST_SPLIT_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_SPLITFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_SPLITFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -49,6 +49,12 @@ class SplitFixture : public framework::Fixture public: void setup(TensorShape shape, unsigned int axis, unsigned int splits, DataType data_type) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _target = compute_target(shape, axis, splits, data_type); _reference = compute_reference(shape, axis, splits, data_type); } @@ -150,6 +156,12 @@ class SplitShapesFixture : public framework::Fixture public: void setup(TensorShape shape, unsigned int axis, std::vector<TensorShape> split_shapes, DataType data_type) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _target = compute_target(shape, axis, split_shapes, data_type); _reference = compute_reference(shape, axis, split_shapes, data_type); } @@ -254,4 +266,4 @@ protected: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_SPLIT_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_SPLITFIXTURE_H diff --git a/tests/validation/fixtures/UnstackFixture.h b/tests/validation/fixtures/UnstackFixture.h index 30b7dd5539..b543ea263c 100644 --- a/tests/validation/fixtures/UnstackFixture.h +++ b/tests/validation/fixtures/UnstackFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_UNSTACK_FIXTURE -#define ARM_COMPUTE_TEST_UNSTACK_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_UNSTACKFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_UNSTACKFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -49,6 +49,12 @@ class UnstackValidationFixture : public framework::Fixture public: void setup(TensorShape input_shape, int axis, int num, DataType data_type) { + if(std::is_same<TensorType, Tensor>::value && // Cpu + data_type == DataType::F16 && !CPUInfo::get().has_fp16()) + { + return; + } + _target = compute_target(input_shape, axis, num, data_type); _reference = compute_reference(input_shape, axis, num, data_type); } @@ -114,4 +120,4 @@ protected: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_UNSTACK_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_UNSTACKFIXTURE_H |