diff options
author | giuros01 <giuseppe.rossini@arm.com> | 2019-05-14 16:12:53 +0100 |
---|---|---|
committer | Giuseppe Rossini <giuseppe.rossini@arm.com> | 2019-05-24 09:20:27 +0000 |
commit | 1e6e1b899c1a88d3466cdc6fd097ccf32ff767e3 (patch) | |
tree | 124d00f9b2bd26ddbb5edf8f3bc7102e7494d404 /tests/validation | |
parent | 048b0f300ee729cac1b3019311589c654771fb8f (diff) | |
download | ComputeLibrary-1e6e1b899c1a88d3466cdc6fd097ccf32ff767e3.tar.gz |
COMPMID-2322: PRELU support in CLActivationLayer
Change-Id: I3aa8d4964a1861c2b8acef59dc863df7c48f3316
Signed-off-by: giuros01 <giuseppe.rossini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1146
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r-- | tests/validation/CL/PReluLayer.cpp | 258 | ||||
-rw-r--r-- | tests/validation/fixtures/ElementwiseOperationsFixture.h | 56 | ||||
-rw-r--r-- | tests/validation/reference/ElementwiseOperations.cpp | 7 |
3 files changed, 321 insertions, 0 deletions
diff --git a/tests/validation/CL/PReluLayer.cpp b/tests/validation/CL/PReluLayer.cpp new file mode 100644 index 0000000000..32fb2a113b --- /dev/null +++ b/tests/validation/CL/PReluLayer.cpp @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "arm_compute/runtime/CL/functions/CLPReluLayer.h" +#include "tests/CL/CLAccessor.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/ConvertPolicyDataset.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/ElementwiseOperationsFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +RelativeTolerance<float> tolerance_fp32(0.000001f); +RelativeTolerance<float> tolerance_fp16(0.001f); + +constexpr unsigned int num_elems_processed_per_iteration = 16; +/** Input data sets **/ +const auto PReluLayerU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U8)), + framework::dataset::make("DataType", + DataType::U8)); +const auto PReluLayerQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataType", + DataType::QASYMM8)); +const auto PReluLayerS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), + framework::dataset::make("DataType", DataType::S16)); +const auto PReluLayerFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("DataType", DataType::F16)); +const auto PReluLayerFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataType", DataType::F32)); +} // namespace + +TEST_SUITE(CL) +TEST_SUITE(PReluLayer) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + }), + framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), + TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), + })), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), + })), + framework::dataset::make("Expected", { true, true, false, false, false})), + input1_info, input2_info, output_info, expected) +{ + ARM_COMPUTE_EXPECT(bool(CLPReluLayer::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +template <typename T> +using CLPReluLayerFixture = PReluLayerValidationFixture<CLTensor, CLAccessor, CLPReluLayer, T>; + +TEST_SUITE(U8) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallShapes(), + shape) +{ + // Create tensors + CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::U8); + CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::U8); + CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8); + + // Create and Configure function + CLPReluLayer prelu; + prelu.configure(&ref_src1, &ref_src2, &dst); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + +FIXTURE_DATA_TEST_CASE(RunSmall, CLPReluLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), PReluLayerU8Dataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() + +template <typename T> +using CLPReluLayerQuantizedFixture = PReluLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLPReluLayer, T>; + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallShapes(), + shape) +{ + // Create tensors + CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::QASYMM8); + CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::QASYMM8); + CLTensor dst = create_tensor<CLTensor>(shape, DataType::QASYMM8); + + // Create and Configure function + CLPReluLayer prelu; + prelu.configure(&ref_src1, &ref_src2, &dst); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + +FIXTURE_DATA_TEST_CASE(RunSmall, CLPReluLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), + PReluLayerQASYMM8Dataset), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 255.f, 20) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255.f, 5) })) + + ) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_fp32, 0.01); +} +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE(S16) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::U8, DataType::S16 })), + shape, data_type) +{ + // Create tensors + CLTensor ref_src1 = create_tensor<CLTensor>(shape, data_type); + CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::S16); + CLTensor dst = create_tensor<CLTensor>(shape, DataType::S16); + + // Create and Configure function + CLPReluLayer prelu; + prelu.configure(&ref_src1, &ref_src2, &dst); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + +FIXTURE_DATA_TEST_CASE(RunSmall, CLPReluLayerFixture<int16_t>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), PReluLayerS16Dataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() + +TEST_SUITE(Float) +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLPReluLayerFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), PReluLayerFP16Dataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01); +} +TEST_SUITE_END() + +TEST_SUITE(FP32) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallShapes(), + shape) +{ + // Create tensors + CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::F32); + CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::F32); + CLTensor dst = create_tensor<CLTensor>(shape, DataType::F32); + + // Create and Configure function + CLPReluLayer prelu; + prelu.configure(&ref_src1, &ref_src2, &dst); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + +FIXTURE_DATA_TEST_CASE(RunSmall, CLPReluLayerFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), PReluLayerFP32Dataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_fp32); +} +template <typename T> +using CLPReluLayerBroadcastFixture = PReluLayerBroadcastValidationFixture<CLTensor, CLAccessor, CLPReluLayer, T>; + +FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLPReluLayerBroadcastFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapesBroadcast(), + PReluLayerFP32Dataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_fp32); +} +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE_END() +TEST_SUITE_END() +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/fixtures/ElementwiseOperationsFixture.h b/tests/validation/fixtures/ElementwiseOperationsFixture.h index e86e7a0f20..de61c487e6 100644 --- a/tests/validation/fixtures/ElementwiseOperationsFixture.h +++ b/tests/validation/fixtures/ElementwiseOperationsFixture.h @@ -337,6 +337,62 @@ public: }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class PReluLayerBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + template <typename...> + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type) + { + ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::PRELU, shape0, shape1, + data_type0, data_type1, output_data_type, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class PReluLayerValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + template <typename...> + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type) + { + ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::PRELU, shape, shape, + data_type0, data_type1, output_data_type, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class PReluLayerValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + template <typename...> + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + + { + ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::PRELU, shape, shape, + data_type0, data_type1, output_data_type, + qinfo0, qinfo1, qinfo_out); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class PReluLayerQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + template <typename...> + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + + { + ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::PRELU, shape0, shape1, + data_type0, data_type1, output_data_type, + qinfo0, qinfo1, qinfo_out); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> class ElementwiseDivisionBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: diff --git a/tests/validation/reference/ElementwiseOperations.cpp b/tests/validation/reference/ElementwiseOperations.cpp index 82f42a0c21..44eb417969 100644 --- a/tests/validation/reference/ElementwiseOperations.cpp +++ b/tests/validation/reference/ElementwiseOperations.cpp @@ -81,6 +81,13 @@ T arithm_op(ArithmeticOperation op, T src1, T src2, ConvertPolicy convert_policy val = std::pow(static_cast<intermediate_type>(src1), static_cast<intermediate_type>(src2)); break; } + case ArithmeticOperation::PRELU: + { + const T x = static_cast<intermediate_type>(src1); + const T alpha = static_cast<intermediate_type>(src2); + val = (x > 0 ? x : alpha * x); + break; + } default: { ARM_COMPUTE_ERROR("Not handled"); |