aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2021-03-02 09:41:13 +0000
committerSang-Hoon Park <sang-hoon.park@arm.com>2021-03-03 15:44:15 +0000
commit5ff38da7e18e91243a7f6b8e642f8b40f5846068 (patch)
treef0e1ef0fa2ddeca0e173de33d6d526387d5a4185 /tests
parent473cb01e84cef6cab057e9492bfa3b68f708e5d7 (diff)
downloadComputeLibrary-5ff38da7e18e91243a7f6b8e642f8b40f5846068.tar.gz
Create ClPRelu operator
Make the class that was in experimental namespace as ClOperator to prepare porting to new interface. The followings are added as a part of this change Also, in-place computation is now correctly considered to be aligned with the class description. Test cases to test in-place computation are added. Partially Implements: COMPMID-4184 Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Change-Id: I71c18ab47fe0370a2060d5303a58ff3650c0093f Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5201 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/Utils.h23
-rw-r--r--tests/validation/CL/PReluLayer.cpp59
2 files changed, 75 insertions, 7 deletions
diff --git a/tests/Utils.h b/tests/Utils.h
index ab256032b0..2569c41a9e 100644
--- a/tests/Utils.h
+++ b/tests/Utils.h
@@ -514,6 +514,21 @@ inline bool is_in_valid_region(const ValidRegion &valid_region, Coordinates coor
/** Create and initialize a tensor of the given type.
*
+ * @param[in] info Tensor information to be used to create the tensor
+ * @param[in] ctx (Optional) Pointer to the runtime context.
+ *
+ * @return Initialized tensor of given type.
+ */
+template <typename T>
+inline T create_tensor(const TensorInfo &info, IRuntimeContext *ctx = nullptr)
+{
+ T tensor(ctx);
+ tensor.allocator()->init(info);
+ return tensor;
+}
+
+/** Create and initialize a tensor of the given type.
+ *
* @param[in] shape Tensor shape.
* @param[in] data_type Data type.
* @param[in] num_channels (Optional) Number of channels.
@@ -531,9 +546,8 @@ inline T create_tensor(const TensorShape &shape, DataType data_type, int num_cha
TensorInfo info(shape, num_channels, data_type);
info.set_quantization_info(quantization_info);
info.set_data_layout(data_layout);
- tensor.allocator()->init(info);
- return tensor;
+ return create_tensor<T>(info, ctx);
}
/** Create and initialize a tensor of the given type.
@@ -549,10 +563,7 @@ inline T create_tensor(const TensorShape &shape, Format format, IRuntimeContext
{
TensorInfo info(shape, format);
- T tensor(ctx);
- tensor.allocator()->init(info);
-
- return tensor;
+ return create_tensor<T>(info, ctx);
}
/** Create and initialize a multi-image of the given type.
diff --git a/tests/validation/CL/PReluLayer.cpp b/tests/validation/CL/PReluLayer.cpp
index 82436a9671..82f3e4f806 100644
--- a/tests/validation/CL/PReluLayer.cpp
+++ b/tests/validation/CL/PReluLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -93,6 +93,63 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
// clang-format on
// *INDENT-ON*
+TEST_SUITE(InPlace)
+TEST_CASE(Validate, framework::DatasetMode::ALL)
+{
+ // PRelu operaotr should be able to take nullptr as output and do the in-place computation.
+ // Shape and data type are selected randomly since they shouldn't matter
+ const auto tensor_info = TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32);
+ const auto result = arm_compute::CLPReluLayer::validate(&tensor_info, &tensor_info, nullptr);
+ ARM_COMPUTE_EXPECT(bool(result) == true, framework::LogLevel::ERRORS);
+}
+
+SimpleTensor<float> compute_float_reference(const TensorInfo &tensor_info)
+{
+ SimpleTensor<float> ref_src1{ tensor_info.tensor_shape(), tensor_info.data_type() };
+ SimpleTensor<float> ref_src2{ tensor_info.tensor_shape(), tensor_info.data_type() };
+ SimpleTensor<float> ref_dst{ tensor_info.tensor_shape(), tensor_info.data_type() };
+
+ library->fill_tensor_uniform(ref_src1, 0);
+ library->fill_tensor_uniform(ref_src2, 1);
+
+ return reference::arithmetic_operation<float>(ArithmeticOperation::PRELU, ref_src1, ref_src2, ref_dst);
+}
+
+void compute_float_target_in_place(CLTensor &src1, CLTensor &src2, bool use_nullptr_output)
+{
+ auto fn = arm_compute::CLPReluLayer{};
+ fn.configure(&src1, &src2, use_nullptr_output ? nullptr : &src1);
+
+ src1.allocator()->allocate();
+ src2.allocator()->allocate();
+
+ library->fill_tensor_uniform(CLAccessor(src1), 0);
+ library->fill_tensor_uniform(CLAccessor(src2), 1);
+
+ fn.run();
+}
+
+TEST_CASE(ComputeWithNullPtr, framework::DatasetMode::ALL)
+{
+ const auto tensor_info = TensorInfo(TensorShape(33U, 13U, 2U), 1, DataType::F32);
+
+ auto src1 = create_tensor<CLTensor>(tensor_info);
+ auto src2 = create_tensor<CLTensor>(tensor_info);
+ compute_float_target_in_place(src1, src2, true);
+ validate(CLAccessor(src1), compute_float_reference(tensor_info));
+}
+
+TEST_CASE(ComputeWithSameTensor, framework::DatasetMode::ALL)
+{
+ const auto tensor_info = TensorInfo(TensorShape(33U, 13U, 2U), 1, DataType::F32);
+
+ auto src1 = create_tensor<CLTensor>(tensor_info);
+ auto src2 = create_tensor<CLTensor>(tensor_info);
+ compute_float_target_in_place(src1, src2, false);
+ validate(CLAccessor(src1), compute_float_reference(tensor_info));
+}
+TEST_SUITE_END() // InPlace
+
template <typename T>
using CLPReluLayerFixture = PReluLayerValidationFixture<CLTensor, CLAccessor, CLPReluLayer, T>;