aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/dynamic_fusion/gpu/Integration.cpp
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-01-19 15:56:00 +0000
committerGunes Bayir <gunes.bayir@arm.com>2023-01-24 09:40:01 +0000
commitcc2877368d5e15d9ea89d31c84ec651fc0fffd13 (patch)
treec57a3a406125b3a31e2d4aff6126ce99f4ade395 /tests/validation/dynamic_fusion/gpu/Integration.cpp
parenta6a153817302793732e28b07c3b4046df3f91a60 (diff)
downloadComputeLibrary-cc2877368d5e15d9ea89d31c84ec651fc0fffd13.tar.gz
Change dynamic fusion API to return destination tensor info
The new dynamic fusion API is introduced in the following patch: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8906 For each operator (except Conv2D, which is migrated in the above patch), we - remove destination tensor from is_supported, validate and create calls - make create_op return ITensorInfo* to the intermediate destination object Affected operators: - DepthwiseConv2D - Cast - Elementwise Ops - Clamp - Reshape - Resize Resolves: COMPMID-5777 Change-Id: Ib60ec8a5f081752808455d7a7d790f2ed0627059 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8991 Reviewed-by: Ramy Elgammal <ramy.elgammal@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Dynamic-Fusion: Ramy Elgammal <ramy.elgammal@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/dynamic_fusion/gpu/Integration.cpp')
-rw-r--r--tests/validation/dynamic_fusion/gpu/Integration.cpp48
1 files changed, 20 insertions, 28 deletions
diff --git a/tests/validation/dynamic_fusion/gpu/Integration.cpp b/tests/validation/dynamic_fusion/gpu/Integration.cpp
index a70f512f9f..7f2d439183 100644
--- a/tests/validation/dynamic_fusion/gpu/Integration.cpp
+++ b/tests/validation/dynamic_fusion/gpu/Integration.cpp
@@ -158,20 +158,17 @@ TEST_CASE(Add_Output_Add_Output, framework::DatasetMode::ALL)
auto gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
GpuWorkloadSketch sketch{ &gpu_ctx };
- auto in_0_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
- auto in_1_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
- auto in_2_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
+ TensorInfo in_0_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
+ TensorInfo in_1_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
+ TensorInfo in_2_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
- auto out_0_info = sketch.create_tensor_info();
- auto out_1_info = sketch.create_tensor_info();
+ TensorInfo out_0_info = sketch.create_tensor_info();
+ TensorInfo out_1_info = sketch.create_tensor_info();
- auto ans_0_info = sketch.create_tensor_info();
- auto ans_1_info = sketch.create_tensor_info();
-
- GpuAdd::create_op(sketch, &in_0_info, &in_1_info, &ans_0_info);
- GpuOutput::create_op(sketch, &ans_0_info, &out_0_info);
- GpuAdd::create_op(sketch, &ans_0_info, &in_2_info, &ans_1_info);
- GpuOutput::create_op(sketch, &ans_1_info, &out_1_info);
+ ITensorInfo *ans_0_info = GpuAdd::create_op(sketch, &in_0_info, &in_1_info);
+ GpuOutput::create_op(sketch, ans_0_info, &out_0_info);
+ ITensorInfo *ans_1_info = GpuAdd::create_op(sketch, ans_0_info, &in_2_info);
+ GpuOutput::create_op(sketch, ans_1_info, &out_1_info);
// Configure runtime
ClWorkloadRuntime runtime;
@@ -257,17 +254,12 @@ TEST_CASE(Add_Output_Add_Cast_Cast_Output, framework::DatasetMode::ALL)
auto gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
GpuWorkloadSketch sketch{ &gpu_ctx };
- auto in_0_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
- auto in_1_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
- auto in_2_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
-
- auto out_0_info = sketch.create_tensor_info();
- auto out_1_info = sketch.create_tensor_info();
+ TensorInfo in_0_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
+ TensorInfo in_1_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
+ TensorInfo in_2_info = sketch.create_tensor_info(t_input_shape, 1, data_type);
- auto ans_0_info = sketch.create_tensor_info();
- auto ans_1_info = sketch.create_tensor_info();
- auto ans_2_info = sketch.create_tensor_info();
- auto ans_3_info = sketch.create_tensor_info();
+ TensorInfo out_0_info = sketch.create_tensor_info();
+ TensorInfo out_1_info = sketch.create_tensor_info();
CastAttributes cast_0_attr;
cast_0_attr.data_type(DataType::S32).convert_policy(ConvertPolicy::SATURATE);
@@ -275,12 +267,12 @@ TEST_CASE(Add_Output_Add_Cast_Cast_Output, framework::DatasetMode::ALL)
CastAttributes cast_1_attr;
cast_1_attr.data_type(DataType::F32).convert_policy(ConvertPolicy::SATURATE);
- GpuAdd::create_op(sketch, &in_0_info, &in_1_info, &ans_0_info);
- GpuOutput::create_op(sketch, &ans_0_info, &out_0_info);
- GpuAdd::create_op(sketch, &ans_0_info, &in_2_info, &ans_1_info);
- GpuCast::create_op(sketch, &ans_1_info, &ans_2_info, cast_0_attr);
- GpuCast::create_op(sketch, &ans_2_info, &ans_3_info, cast_1_attr);
- GpuOutput::create_op(sketch, &ans_3_info, &out_1_info);
+ ITensorInfo *ans_0_info = GpuAdd::create_op(sketch, &in_0_info, &in_1_info);
+ GpuOutput::create_op(sketch, ans_0_info, &out_0_info);
+ ITensorInfo *ans_1_info = GpuAdd::create_op(sketch, ans_0_info, &in_2_info);
+ ITensorInfo *ans_2_info = GpuCast::create_op(sketch, ans_1_info, cast_0_attr);
+ ITensorInfo *ans_3_info = GpuCast::create_op(sketch, ans_2_info, cast_1_attr);
+ GpuOutput::create_op(sketch, ans_3_info, &out_1_info);
// Configure runtime
ClWorkloadRuntime runtime;