aboutsummaryrefslogtreecommitdiff
path: root/src/dynamic_fusion/runtime/gpu
diff options
context:
space:
mode:
authorRamy Elgammal <ramelg01@e129512.arm.com>2023-01-11 18:48:04 +0000
committerRamy Elgammal <ramy.elgammal@arm.com>2023-01-25 10:24:23 +0000
commit002e6530f6218b00a28aef9be8b21efb08cf3602 (patch)
treef3e2f9d064b985ffe283512825b34cdd59f29f50 /src/dynamic_fusion/runtime/gpu
parentcc2877368d5e15d9ea89d31c84ec651fc0fffd13 (diff)
downloadComputeLibrary-002e6530f6218b00a28aef9be8b21efb08cf3602.tar.gz
Implement dynamic fusion softmax operator
- Return aux tensorInfo by get_aux_tensors() at runtime to init the aux tensor with the right size. - Keep softmax unfusable for this commit - Hence, added Tensor3D to template writer arguments declaration, for sake of keeping dynamic fusion softmax componenets' kernels matching their cl counterparts. Resolves: COMPMID-5523 Change-Id: I667f39545db925f667036ef448302c79a0330373 Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com> Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/483924 Tested-by: bsgcomp <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: bsgcomp <bsgcomp@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8986 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/dynamic_fusion/runtime/gpu')
-rw-r--r--src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp2
-rw-r--r--src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp8
2 files changed, 5 insertions, 5 deletions
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
index 022d4685fe..b3ec39362c 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
@@ -41,7 +41,7 @@ void ClKernelRuntime::configure(const ClCompileContext &compile_ctx, const GpuKe
// Create kernel from kernel source string
opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get();
_kernel = static_cast<cl::Kernel>(compile_ctx.create_kernel(code.name(),
- "" /* Program name: Used to as part of a unique string for built kernel cache. Not needed */,
+ code.name(), // program name has to be provided to differentiate between different unfusable components' kernels.
code.code(),
klib.kernel_path() /* Kernel path: Used in cases of embedded kernels */,
code.build_options().options(),
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
index 7e427fef72..cd21b10180 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -355,12 +355,12 @@ Status ClWorkloadRuntime::run(const std::vector<CLTensor *> &tensors)
return Status{};
}
-std::vector<std::pair<CLTensor *, AuxMemoryInfo>> ClWorkloadRuntime::get_auxiliary_tensors()
+std::vector<std::tuple<CLTensor *, TensorInfo, AuxMemoryInfo>> ClWorkloadRuntime::get_auxiliary_tensors()
{
- std::vector<std::pair<CLTensor *, AuxMemoryInfo>> aux_tensors;
+ std::vector<std::tuple<CLTensor *, TensorInfo, AuxMemoryInfo>> aux_tensors;
for(const auto &data : _impl->_aux_tensors.get_tensors())
{
- aux_tensors.emplace_back(data.tensor, data.memory_info);
+ aux_tensors.emplace_back(data.tensor, data.tensor_info, data.memory_info);
}
return aux_tensors;
}