diff options
author | Gunes Bayir <gunes.bayir@arm.com> | 2023-01-19 15:56:00 +0000 |
---|---|---|
committer | Gunes Bayir <gunes.bayir@arm.com> | 2023-01-24 09:40:01 +0000 |
commit | cc2877368d5e15d9ea89d31c84ec651fc0fffd13 (patch) | |
tree | c57a3a406125b3a31e2d4aff6126ce99f4ade395 /tests/validation/dynamic_fusion/gpu/cl/Clamp.cpp | |
parent | a6a153817302793732e28b07c3b4046df3f91a60 (diff) | |
download | ComputeLibrary-cc2877368d5e15d9ea89d31c84ec651fc0fffd13.tar.gz |
Change dynamic fusion API to return destination tensor info
The new dynamic fusion API is introduced in the following patch:
https://review.mlplatform.org/c/ml/ComputeLibrary/+/8906
For each operator (except Conv2D, which is migrated in the above patch), we
- remove destination tensor from is_supported, validate and create calls
- make create_op return ITensorInfo* to the intermediate destination object
Affected operators:
- DepthwiseConv2D
- Cast
- Elementwise Ops
- Clamp
- Reshape
- Resize
Resolves: COMPMID-5777
Change-Id: Ib60ec8a5f081752808455d7a7d790f2ed0627059
Signed-off-by: Gunes Bayir <gunes.bayir@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8991
Reviewed-by: Ramy Elgammal <ramy.elgammal@arm.com>
Reviewed-by: Jakub Sujak <jakub.sujak@arm.com>
Dynamic-Fusion: Ramy Elgammal <ramy.elgammal@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/dynamic_fusion/gpu/cl/Clamp.cpp')
-rw-r--r-- | tests/validation/dynamic_fusion/gpu/cl/Clamp.cpp | 23 |
1 files changed, 5 insertions, 18 deletions
diff --git a/tests/validation/dynamic_fusion/gpu/cl/Clamp.cpp b/tests/validation/dynamic_fusion/gpu/cl/Clamp.cpp index 947201ff97..177c02c2c7 100644 --- a/tests/validation/dynamic_fusion/gpu/cl/Clamp.cpp +++ b/tests/validation/dynamic_fusion/gpu/cl/Clamp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -51,33 +51,21 @@ TEST_SUITE(DYNAMIC_FUSION) TEST_SUITE(CLAMP) // *INDENT-OFF* // clang-format off -DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Minimum value larger than maximum value }), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), - })), framework::dataset::make("MinVal", { 0.2f, 1.5f, - 0.1f, - 3.0f, 9.0f, })), framework::dataset::make("MaxVal", { 0.5f, 2.0f, 1.0f, - 4.0f, - 1.0f, })), - framework::dataset::make("Expected", { true, true, false, false, false })), - input_info, output_info, min_val, max_val, expected) + framework::dataset::make("Expected", { true, true, false })), + input_info, min_val, max_val, expected) { // Create a new workload sketch CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); @@ -86,13 +74,12 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( // Fuse Clamp const TensorInfo src_info = sketch.create_tensor_info(input_info); - const TensorInfo dst_info = sketch.create_tensor_info(output_info); ClampAttributes attributes {}; attributes.min_val(min_val) .max_val(max_val); - const bool res = static_cast<bool>(GpuClamp::validate_op(sketch, &src_info, &dst_info, attributes)); + const bool res = static_cast<bool>(GpuClamp::validate_op(sketch, &src_info, attributes)); ARM_COMPUTE_EXPECT(res == expected, framework::LogLevel::ERRORS); } // clang-format on |