aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/PReluLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/CL/PReluLayer.cpp')
-rw-r--r--tests/validation/CL/PReluLayer.cpp73
1 files changed, 66 insertions, 7 deletions
diff --git a/tests/validation/CL/PReluLayer.cpp b/tests/validation/CL/PReluLayer.cpp
index 82436a9671..f3f1c8b1b8 100644
--- a/tests/validation/CL/PReluLayer.cpp
+++ b/tests/validation/CL/PReluLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,7 +56,7 @@ const auto PReluLayerQASYMM8Dataset = combine(combine(framework::dataset::make("
const auto PReluLayerQASYMM8SIGNEDDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
framework::dataset::make("DataType",
DataType::QASYMM8_SIGNED));
-const auto PReluLayerS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)),
+const auto PReluLayerS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::S16 }), framework::dataset::make("DataType", DataType::S16)),
framework::dataset::make("DataType", DataType::S16));
const auto PReluLayerFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("DataType", DataType::F16));
@@ -71,21 +71,18 @@ TEST_SUITE(PReluLayer)
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
}),
framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
})),
- framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
})),
- framework::dataset::make("Expected", { true, true, false, false})),
+ framework::dataset::make("Expected", { true, false, false})),
input1_info, input2_info, output_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLPReluLayer::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
@@ -93,6 +90,63 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
// clang-format on
// *INDENT-ON*
+TEST_SUITE(InPlace)
+TEST_CASE(Validate, framework::DatasetMode::ALL)
+{
+ // PRelu operaotr should be able to take nullptr as output and do the in-place computation.
+ // Shape and data type are selected randomly since they shouldn't matter
+ const auto tensor_info = TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32);
+ const auto result = arm_compute::CLPReluLayer::validate(&tensor_info, &tensor_info, nullptr);
+ ARM_COMPUTE_EXPECT(bool(result) == true, framework::LogLevel::ERRORS);
+}
+
+SimpleTensor<float> compute_float_reference(const TensorInfo &tensor_info)
+{
+ SimpleTensor<float> ref_src1{ tensor_info.tensor_shape(), tensor_info.data_type() };
+ SimpleTensor<float> ref_src2{ tensor_info.tensor_shape(), tensor_info.data_type() };
+ SimpleTensor<float> ref_dst{ tensor_info.tensor_shape(), tensor_info.data_type() };
+
+ library->fill_tensor_uniform(ref_src1, 0);
+ library->fill_tensor_uniform(ref_src2, 1);
+
+ return reference::arithmetic_operation<float>(ArithmeticOperation::PRELU, ref_src1, ref_src2, ref_dst);
+}
+
+void compute_float_target_in_place(CLTensor &src1, CLTensor &src2, bool use_nullptr_output)
+{
+ auto fn = arm_compute::CLPReluLayer{};
+ fn.configure(&src1, &src2, use_nullptr_output ? nullptr : &src1);
+
+ src1.allocator()->allocate();
+ src2.allocator()->allocate();
+
+ library->fill_tensor_uniform(CLAccessor(src1), 0);
+ library->fill_tensor_uniform(CLAccessor(src2), 1);
+
+ fn.run();
+}
+
+TEST_CASE(ComputeWithNullPtr, framework::DatasetMode::ALL)
+{
+ const auto tensor_info = TensorInfo(TensorShape(33U, 13U, 2U), 1, DataType::F32);
+
+ auto src1 = create_tensor<CLTensor>(tensor_info);
+ auto src2 = create_tensor<CLTensor>(tensor_info);
+ compute_float_target_in_place(src1, src2, true);
+ validate(CLAccessor(src1), compute_float_reference(tensor_info));
+}
+
+TEST_CASE(ComputeWithSameTensor, framework::DatasetMode::ALL)
+{
+ const auto tensor_info = TensorInfo(TensorShape(33U, 13U, 2U), 1, DataType::F32);
+
+ auto src1 = create_tensor<CLTensor>(tensor_info);
+ auto src2 = create_tensor<CLTensor>(tensor_info);
+ compute_float_target_in_place(src1, src2, false);
+ validate(CLAccessor(src1), compute_float_reference(tensor_info));
+}
+TEST_SUITE_END() // InPlace
+
template <typename T>
using CLPReluLayerFixture = PReluLayerValidationFixture<CLTensor, CLAccessor, CLPReluLayer, T>;
@@ -143,6 +197,11 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPReluLayerFixture<int16_t>, framework::Datase
// Validate output
validate(CLAccessor(_target), _reference);
}
+FIXTURE_DATA_TEST_CASE(RunOneDimensional, CLPReluLayerFixture<int16_t>, framework::DatasetMode::ALL, combine(framework::dataset::make("Shape", TensorShape(1U, 16U)), PReluLayerS16Dataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
TEST_SUITE_END()
TEST_SUITE(Float)