aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2022-03-03 10:09:01 +0000
committerGiorgio Arena <giorgio.arena@arm.com>2022-03-08 10:41:03 +0000
commit232c45253a84c16fc70eae6406cac5f4048efaca (patch)
tree32aa46ed0cb2bdc9877709d0573f45778894fefe
parent4cbcb840caca1346de5f2271b67e4ede17b72734 (diff)
downloadComputeLibrary-232c45253a84c16fc70eae6406cac5f4048efaca.tar.gz
Merge kernel prototype patch
Resolves: COMPMID-5151 Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: Ic4024d5cd4819fe917a1d49621f1866ae2e90a37 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7260 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp5
-rw-r--r--SConscript12
-rw-r--r--SConstruct1
-rw-r--r--arm_compute/core/CL/CLCompileContext.h4
-rw-r--r--arm_compute/runtime/CL/CLScheduler.h36
-rw-r--r--docs/Doxyfile4
-rw-r--r--filelist.json9
-rwxr-xr-xscripts/check_bad_style.sh2
-rwxr-xr-xscripts/clang_tidy_rules.py1
-rw-r--r--src/core/CL/CLCompileContext.cpp7
-rw-r--r--src/core/CL/ICLKernel.h25
-rw-r--r--src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/fp_post_ops_act_eltwise_op_act.h4
-rw-r--r--src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl7
-rw-r--r--src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/fp_mixed_precision_helpers.h4
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.cpp136
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h255
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h668
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Utils.h77
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.cpp92
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h71
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.cpp341
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h74
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClKernelComponents.h35
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp76
-rw-r--r--src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h69
-rw-r--r--src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp176
-rw-r--r--src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h79
-rw-r--r--src/runtime/CL/CLScheduler.cpp44
-rw-r--r--tests/AssetsLibrary.h24
-rw-r--r--tests/SConscript3
-rw-r--r--tests/validation/CL/UNIT/dynamic_fusion/ClCompositeKernel.cpp643
31 files changed, 2957 insertions, 27 deletions
diff --git a/Android.bp b/Android.bp
index 340aeeed23..c858b07230 100644
--- a/Android.bp
+++ b/Android.bp
@@ -362,6 +362,10 @@ cc_library_static {
"src/core/Utils.cpp",
"src/core/Validate.cpp",
"src/core/Version.cpp",
+ "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.cpp",
+ "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.cpp",
+ "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.cpp",
+ "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp",
"src/core/helpers/SoftmaxHelpers.cpp",
"src/core/helpers/WindowHelpers.cpp",
"src/core/utils/AssemblyUtils.cpp",
@@ -605,6 +609,7 @@ cc_library_static {
"src/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp",
"src/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp",
"src/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp",
+ "src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp",
"src/gpu/cl/kernels/gemm/ClGemmHelpers.cpp",
"src/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp",
"src/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.cpp",
diff --git a/SConscript b/SConscript
index 62ec9f8854..13ef37a7e2 100644
--- a/SConscript
+++ b/SConscript
@@ -172,7 +172,7 @@ def resolve_includes(target, source, env):
for line in tmp_file:
found = pattern.search(line)
if found:
- include_file = found.group(1)
+ include_file = found.group(1).split('/')[-1]
data = files_dict[include_file].file_contents
updated_file.extend(data)
else:
@@ -490,6 +490,16 @@ with (open(Dir('#').path + '/filelist.json')) as fp:
# Common backend files
lib_files = filelist['common']
+# Experimental files
+# Dynamic fusion
+if env['experimental_dynamic_fusion']:
+ if env['embed_kernels']:
+ print("Dynamic fusion with embed_kernels=1 not supported. Skipping.")
+ else:
+ lib_files += filelist['experimental']['dynamic_fusion']
+ arm_compute_env.Append(CPPDEFINES = ['ENABLE_EXPERIMENTAL_DYNAMIC_FUSION'])
+
+
# Logging files
if env["logging"]:
lib_files += filelist['logging']
diff --git a/SConstruct b/SConstruct
index cc5a382c1e..ec669f246e 100644
--- a/SConstruct
+++ b/SConstruct
@@ -114,6 +114,7 @@ vars.AddVariables(
BoolVariable("high_priority", "Generate a library containing only the high priority operators", False),
PathVariable("linker_script", "Use an external linker script", "", PathVariable.PathAccept),
PathVariable("external_tests_dir", "Add examples, benchmarks and tests to the tests suite", "", PathVariable.PathAccept),
+ BoolVariable("experimental_dynamic_fusion", "Build the experimental dynamic fusion files", False),
ListVariable("custom_options", "Custom options that can be used to turn on/off features", "none", ["disable_mmla_fp"]),
ListVariable("data_type_support", "Enable a list of data types to support", "all", ["qasymm8", "qasymm8_signed", "qsymm16", "fp16", "fp32", "integer"]),
ListVariable("data_layout_support", "Enable a list of data layout to support", "all", ["nhwc", "nchw"]),
diff --git a/arm_compute/core/CL/CLCompileContext.h b/arm_compute/core/CL/CLCompileContext.h
index 46a8c9b341..0ce074df2b 100644
--- a/arm_compute/core/CL/CLCompileContext.h
+++ b/arm_compute/core/CL/CLCompileContext.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -77,6 +77,8 @@ public:
*/
const StringSet &options() const;
+ bool operator==(const CLBuildOptions &other) const;
+
private:
StringSet _build_opts; /**< Build options set */
};
diff --git a/arm_compute/runtime/CL/CLScheduler.h b/arm_compute/runtime/CL/CLScheduler.h
index 37d9e2ad05..7ad59782e7 100644
--- a/arm_compute/runtime/CL/CLScheduler.h
+++ b/arm_compute/runtime/CL/CLScheduler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,6 +35,20 @@
#include "arm_compute/runtime/CL/CLTypes.h"
#include "arm_compute/runtime/CL/ICLTuner.h"
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+struct TensorBinding;
+struct ClExecutionDescriptor;
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
namespace arm_compute
{
class ICLKernel;
@@ -87,6 +101,21 @@ public:
*/
void enqueue_op(ICLKernel &kernel, ITensorPack &tensors, bool flush = true);
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+ /** Schedule the execution of the passed kernel if possible.
+ * Use TensorBinding instead of ITensorPack for working with dynamic fusion
+ * @note Does not support dynamic tuning yet
+ *
+ * @param[in] kernel Kernel to execute.
+ * @param[in] tensors Map containing the tensors to operate on.
+ * @param[in] exec_desc Execution descriptor
+ * @param[in] flush (Optional) Specifies if the command queue will be flushed after running the kernel. This will be ignored if job chaining is enabled.
+ */
+ void enqueue_op(ICLKernel &kernel, experimental::dynamic_fusion::TensorBinding &tensors, const experimental::dynamic_fusion::ClExecutionDescriptor &exec_desc, bool flush = true);
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
/** Initialises the context and command queue to be used by the scheduler.
*
* @param[in] context A CL context.
@@ -173,6 +202,11 @@ public:
private:
void enqueue_common(ICLKernel &kernel, ITensorPack &tensors, bool flush);
+
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+ void enqueue_common(ICLKernel &kernel, experimental::dynamic_fusion::TensorBinding &tensors, const experimental::dynamic_fusion::ClExecutionDescriptor &exec_desc, bool flush);
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
/** Flag to ensure symbols initialisation is happening before Scheduler creation */
static std::once_flag _initialize_symbols;
diff --git a/docs/Doxyfile b/docs/Doxyfile
index 02ed988dce..065aa24458 100644
--- a/docs/Doxyfile
+++ b/docs/Doxyfile
@@ -876,6 +876,7 @@ EXCLUDE = ./arm_compute/core/NEON/kernels/assembly/ \
./src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp \
./tests/datasets/ \
./tests/benchmark/fixtures/ \
+ ./tests/validation/CL/UNIT/dynamic_fusion/ClCompositeKernel.cpp \
./tests/validation/fixtures/
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
@@ -2096,7 +2097,8 @@ PREDEFINED = DOXYGEN_SKIP_THIS \
LOCATE_MIN \
LOCATE_MAX \
HAS_BIAS \
- POOL_AVG
+ POOL_AVG \
+ ENABLE_EXPERIMENTAL_DYNAMIC_FUSION
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
diff --git a/filelist.json b/filelist.json
index 81b28f7f4b..112b1aac7d 100644
--- a/filelist.json
+++ b/filelist.json
@@ -2024,5 +2024,14 @@
}
}
}
+ },
+ "experimental": {
+ "dynamic_fusion": [
+ "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.cpp",
+ "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.cpp",
+ "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.cpp",
+ "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp",
+ "src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp"
+ ]
}
} \ No newline at end of file
diff --git a/scripts/check_bad_style.sh b/scripts/check_bad_style.sh
index 0c0d83ac2f..91849266f4 100755
--- a/scripts/check_bad_style.sh
+++ b/scripts/check_bad_style.sh
@@ -29,7 +29,7 @@ then
exit -1
fi
-grep -HnRE --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=arm_conv "\buint " --exclude-dir=cl_kernels --exclude-dir=cs_shaders $FILES | tee bad_style.log
+grep -HnRE --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=dynamic_fusion --exclude-dir=arm_conv "\buint " --exclude-dir=cl_kernels --exclude-dir=cs_shaders $FILES | tee bad_style.log
if [[ $(cat bad_style.log | wc -l) > 0 ]]
then
echo ""
diff --git a/scripts/clang_tidy_rules.py b/scripts/clang_tidy_rules.py
index d718af0f21..0a0de84bab 100755
--- a/scripts/clang_tidy_rules.py
+++ b/scripts/clang_tidy_rules.py
@@ -15,6 +15,7 @@ def get_list_flags( filename, arch):
flags = ["-std=c++14"]
flags.append("-DARM_COMPUTE_CPP_SCHEDULER=1")
flags.append("-DARM_COMPUTE_CL")
+ flags.append("-DENABLE_EXPERIMENTAL_DYNAMIC_FUSION")
if arch == "aarch64":
flags.append("-DARM_COMPUTE_AARCH64_V8_2")
return flags
diff --git a/src/core/CL/CLCompileContext.cpp b/src/core/CL/CLCompileContext.cpp
index c9dd13e25c..b9b2b5651a 100644
--- a/src/core/CL/CLCompileContext.cpp
+++ b/src/core/CL/CLCompileContext.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -74,6 +74,11 @@ const CLBuildOptions::StringSet &CLBuildOptions::options() const
return _build_opts;
}
+bool CLBuildOptions::operator==(const CLBuildOptions &other) const
+{
+ return _build_opts == other._build_opts;
+}
+
Program::Program()
: _context(), _device(), _is_binary(false), _name(), _source(), _binary()
{
diff --git a/src/core/CL/ICLKernel.h b/src/core/CL/ICLKernel.h
index bc138e7e3f..4c8028e42a 100644
--- a/src/core/CL/ICLKernel.h
+++ b/src/core/CL/ICLKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,6 +37,20 @@
#include <string>
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+struct TensorBinding;
+struct ClExecutionDescriptor;
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
namespace arm_compute
{
namespace
@@ -63,7 +77,6 @@ template <typename T>
class ICLArray;
class ICLTensor;
class Window;
-
/** Common interface for all the OpenCL kernels */
class ICLKernel : public IKernel
{
@@ -323,6 +336,14 @@ public:
{
ARM_COMPUTE_UNUSED(tensors, window, queue);
}
+
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+ /// The execution is carried out through run_op method. But the run_op method needs to be extended to include ClExecutionDescriptor as now LWS GWS tuning will be separated from the IKernel
+ virtual void run_composite_op(experimental::dynamic_fusion::TensorBinding &tensors, const Window &window, cl::CommandQueue &queue, const experimental::dynamic_fusion::ClExecutionDescriptor &exec_desc)
+ {
+ ARM_COMPUTE_UNUSED(tensors, window, queue, exec_desc);
+ }
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
/** Add the passed parameters to the object's kernel's arguments starting from the index idx.
*
* @param[in,out] idx Index at which to start adding the arguments. Will be incremented by the number of kernel arguments set.
diff --git a/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/fp_post_ops_act_eltwise_op_act.h b/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/fp_post_ops_act_eltwise_op_act.h
index 070c47bd7f..2c2d60ed13 100644
--- a/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/fp_post_ops_act_eltwise_op_act.h
+++ b/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/fp_post_ops_act_eltwise_op_act.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "fp_mixed_precision_helpers.h"
+#include "common/experimental/gemm_fused_post_ops/fp_mixed_precision_helpers.h"
/** (EXPERIMENTAL_POST_OPS) Post Op expansions for the post op sequence:
* act (optional): POST_OP1_ACTIVATION_OPTIONAL
diff --git a/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl b/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl
index d8453ed80a..22ae098772 100644
--- a/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl
+++ b/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,7 +22,10 @@
* SOFTWARE.
*/
-#include "fp_post_ops_act_eltwise_op_act.h"
+#include "common/experimental/gemm_fused_post_ops/act_eltwise_op_act/fp_post_ops_act_eltwise_op_act.h"
+#include "common/experimental/gemm_fused_post_ops/fp_elementwise_op_helpers.h"
+#include "common/experimental/gemm_fused_post_ops/fp_mixed_precision_helpers.h"
+
#include "gemm_helpers.h"
#include "repeat.h"
diff --git a/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/fp_mixed_precision_helpers.h b/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/fp_mixed_precision_helpers.h
index e8df121142..e107f4452d 100644
--- a/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/fp_mixed_precision_helpers.h
+++ b/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/fp_mixed_precision_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "fp_elementwise_op_helpers.h"
+#include "common/experimental/gemm_fused_post_ops/fp_elementwise_op_helpers.h"
#include "gemm_helpers.h"
#include "load_store_utility.h"
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.cpp
new file mode 100644
index 0000000000..e40f9c6da9
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h"
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h"
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClKernelComponents.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ClKernelBlueprint::ClKernelBlueprint()
+ : _impl{ std::make_unique<ClKernelBlueprint::Implementation>() }
+{
+}
+
+ClKernelBlueprint::~ClKernelBlueprint() = default;
+
+ClKernelBlueprint::Implementation &ClKernelBlueprint::impl()
+{
+ return *_impl;
+}
+const ClKernelBlueprint::Implementation &ClKernelBlueprint::impl() const
+{
+ return *_impl;
+}
+
+Status add_tensor_argument(ClKernelBlueprint &kernel_blueprint, const ClTensorDescriptor &tensor_desc, ArgumentID &id)
+{
+ id = kernel_blueprint.impl().add_kernel_argument(tensor_desc);
+ return Status{};
+}
+
+Status add_tensor_intermed(ClKernelBlueprint &kernel_blueprint, ArgumentID &id)
+{
+ id = kernel_blueprint.impl().add_intermediate_tensor();
+ return Status{};
+}
+
+Status add_kcomp_gemm_native(ClKernelBlueprint &kernel_blueprint, const ClKernelComponentDescriptor &, const GemmNativeDescriptor &,
+ ArgumentID lhs_id, ArgumentID rhs_id, ArgumentID bias_id, ArgumentID &dst_id)
+{
+ kernel_blueprint.impl().validate_arg_ids({ lhs_id, rhs_id, bias_id, dst_id });
+
+ kernel_blueprint.impl().add_component(
+ std::make_unique<ClGemmNativeKernelComponent>(
+ SharedVarLink{ lhs_id, SharedVarIO::Input, kernel_blueprint.impl().group(lhs_id) },
+ SharedVarLink{ rhs_id, SharedVarIO::Input, kernel_blueprint.impl().group(rhs_id) },
+ SharedVarLink{ dst_id, SharedVarIO::Output, kernel_blueprint.impl().group(dst_id) },
+ SharedVarLink{ bias_id, SharedVarIO::Input, kernel_blueprint.impl().group(bias_id) }));
+
+ return Status{};
+}
+
+Status add_kcomp_eltwise_add(ClKernelBlueprint &kernel_blueprint, const ClKernelComponentDescriptor &, const EltwiseAddDescriptor &,
+ ArgumentID src0_id, ArgumentID src1_id, ArgumentID &dst_id)
+{
+ kernel_blueprint.impl().add_component(
+ std::make_unique<ClElementwiseAddKernelComponent>(
+ SharedVarLink{ src0_id, SharedVarIO::Input, kernel_blueprint.impl().group(src0_id) },
+ SharedVarLink{ src1_id, SharedVarIO::Input, kernel_blueprint.impl().group(src1_id) },
+ SharedVarLink{ dst_id, SharedVarIO::Output, kernel_blueprint.impl().group(dst_id) }));
+
+ return Status{};
+}
+Status add_kcomp_activation(ClKernelBlueprint &, const ClKernelComponentDescriptor &, const ActivationDescriptor &, ArgumentID, ArgumentID &)
+{
+ return Status{};
+}
+Status add_kcomp_store(ClKernelBlueprint &kernel_blueprint, const ClKernelComponentDescriptor &, ArgumentID src_tile, ArgumentID dst_tile, const StoreType &store_type)
+{
+ switch(store_type)
+ {
+ case StoreType::StoreBlockBoundaryAware:
+ kernel_blueprint.impl().add_component(
+ std::make_unique<ClStoreBlockBoundaryAwareKernelComponent>(
+ SharedVarLink{ src_tile, SharedVarIO::Input, kernel_blueprint.impl().group(src_tile) },
+ SharedVarLink{ dst_tile, SharedVarIO::Output, kernel_blueprint.impl().group(dst_tile) }));
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Store mode not yet supported.");
+ }
+
+ return Status{};
+}
+
+Status set_tile_info(ClKernelBlueprint &bp, const TileDescriptor &tile_info)
+{
+ bp.impl().set_tile_info(tile_info);
+ return Status{};
+}
+Status build(ClKernelCode &code, const ClCodeBuilderContext &, ClKernelBlueprint &kernel_blueprint)
+{
+ code.name = kernel_blueprint.impl().build_kernel_name();
+ code.code = kernel_blueprint.impl().build_code();
+
+ code.config_id = kernel_blueprint.impl().build_config_id();
+ code.build_options = kernel_blueprint.impl().build_options();
+ code.window = kernel_blueprint.impl().get_execution_window();
+ code.arguments = kernel_blueprint.impl().get_arguments();
+
+ return Status{};
+}
+Status tune_static(ClExecutionDescriptor &, const ClKernelCode &)
+{
+ return Status{};
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h
new file mode 100644
index 0000000000..15622c848d
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_CLKERNELBUILDINGAPI_H
+#define ARM_COMPUTE_EXPERIMENTAL_CLKERNELBUILDINGAPI_H
+
+#include "arm_compute/core/CL/CLCompileContext.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+using ArgumentID = int32_t;
+
+static constexpr ArgumentID g_arg_placeholder = -1;
+
+/** Verbose and explicit way to enumerate all the tensor arguments variants used by
+ * all kernel implementations. This avoids any ambiguity in what kernel arguments are passed
+ */
+enum class TensorArgType : int
+{
+ Scalar,
+
+ Vector,
+
+ Image,
+ Image_Reinterpret_As_3D,
+ Image_Export_To_ClImage2D,
+
+ Image_3D, // 3D Tensor represented as a 2D Image + stride_z
+ Image_3D_Export_To_ClImage2D,
+
+ Tensor_3D,
+ Tensor_4D
+};
+/** Describes all the info required to add a kernel argument at run time */
+struct ClKernelArgRuntimeDescriptor
+{
+ ClKernelArgRuntimeDescriptor(int arg_id, TensorArgType type, bool slide_along_dimz = true)
+ : arg_id{ arg_id }, tensor_arg_type{ type }, slide_along_dimz{ slide_along_dimz }
+ {
+ }
+ ~ClKernelArgRuntimeDescriptor() = default;
+ int arg_id{ g_arg_placeholder }; // Arg ID in the blueprint
+ TensorArgType tensor_arg_type{ TensorArgType::Image };
+ bool slide_along_dimz{ true };
+};
+
+using ClKernelArgList = std::vector<ClKernelArgRuntimeDescriptor>;
+
+/** Intermediate representation of the final, complete kernel source. */
+class ClKernelBlueprint
+{
+public:
+ ClKernelBlueprint();
+ ~ClKernelBlueprint();
+
+private:
+ struct Implementation;
+ std::unique_ptr<Implementation> _impl;
+
+public:
+ Implementation &impl();
+ const Implementation &impl() const;
+};
+
+///// Kernel Components /////
+
+/** Meta information about all Cl Kernel Components */
+struct ClKernelComponentDescriptor
+{
+ int32_t version{ 1 }; /**< Operator version */
+};
+
+/** Component: Tensor Argument */
+struct ClTensorDescriptor
+{
+ ClTensorDescriptor(const ITensorInfo *info, unsigned int dim)
+ : tensor_info(info), slice_dim(dim)
+ {
+ }
+
+ const ITensorInfo *tensor_info;
+ unsigned int slice_dim;
+};
+
+Status add_tensor_argument(ClKernelBlueprint &, const ClTensorDescriptor &, ArgumentID &);
+Status add_tensor_intermed(ClKernelBlueprint &, ArgumentID &);
+
+/** Component: Gemm Native */
+struct GemmNativeDescriptor
+{
+ float alpha{};
+ float beta{};
+ unsigned int m{};
+ unsigned int n{};
+ unsigned int k{};
+ unsigned int depth_output_gemm3d{};
+ bool reinterpret_input_as_3d{};
+ bool broadcast_bias{};
+ bool fp_mixed_precision{};
+ bool has_pad_y{};
+ int nmult_transpose1xW_width{};
+ int mult_interleave4x4_height{};
+ GEMMLHSMatrixInfo lhs_info{};
+ GEMMRHSMatrixInfo rhs_info{};
+ int32_t a_offset{};
+ int32_t b_offset{};
+};
+
+Status add_kcomp_gemm_native(ClKernelBlueprint &, const ClKernelComponentDescriptor &, const GemmNativeDescriptor &, ArgumentID input_id,
+ ArgumentID weights_id, ArgumentID bias_id, ArgumentID &dst_id);
+
+/** Component: Eltwise Add */
+struct EltwiseAddDescriptor
+{
+ ConvertPolicy convert_policy{ ConvertPolicy::SATURATE };
+};
+Status add_kcomp_eltwise_add(ClKernelBlueprint &, const ClKernelComponentDescriptor &, const EltwiseAddDescriptor &, ArgumentID src0_id,
+ ArgumentID src1_id, ArgumentID &dst_id);
+
+/** Component: Activation */
+struct ActivationDescriptor
+{
+};
+Status add_kcomp_activation(ClKernelBlueprint &, const ClKernelComponentDescriptor &, const ActivationDescriptor &, ArgumentID src_id, ArgumentID &dst_id);
+
+enum class ClippingStrategy
+{
+ TOP_LEFT,
+ TOP_RIGHT,
+ BOTTOM_LEFT,
+ BOTTOM_RIGHT,
+};
+
+/** Component: Store */
+struct TileDescriptor
+{
+ Size2D tile_dims{};
+ Size2D boundaries{};
+ ClippingStrategy clipping{ ClippingStrategy::TOP_LEFT };
+
+ TileDescriptor()
+ {
+ }
+
+ TileDescriptor(Size2D dims, const Size2D &bound, const ClippingStrategy &clip)
+ : tile_dims(dims), boundaries(bound), clipping(clip)
+ {
+ }
+
+ bool empty() const
+ {
+ return (tile_dims.area() == 0) || (boundaries.area() == 0);
+ }
+};
+
+enum class StoreType
+{
+ VStore,
+ VStorePartial,
+ StoreRow,
+ ConvertStoreRow,
+ StoreBlock,
+ ConvertStoreBlock,
+ StoreRowPartial,
+ StoreBlockPartial,
+ StoreBlockBoundaryAware,
+ StoreVectorSelect,
+ TStoreIndirectWidthSelect
+};
+
+Status add_kcomp_store(ClKernelBlueprint &, const ClKernelComponentDescriptor &, ArgumentID src_id, ArgumentID dst_id, const StoreType &store_type);
+
+///// Kernel Components /////
+
+///// Building /////
+
+/** Information required for kernel compilation. The build results of KernelBlueprint */
+struct ClKernelCode
+{
+ std::string name{}; /**< Kernel name */
+ std::string code{}; /**< Kernel source code */
+ std::string config_id{}; /**< Generated from blueprint based on complex component */
+ CLBuildOptions build_options{}; /**< Kernel build options */
+ Window window{}; /**< Execution window */
+ ClKernelArgList arguments{}; /**< Kernel argument specficiations */
+
+ bool operator==(const ClKernelCode &other) const
+ {
+ return name == other.name && code == other.code && build_options == other.build_options;
+ }
+};
+
+/** GPU information for building the @ref ClKernelCode */
+struct GpuInfo
+{
+ GPUTarget target{ GPUTarget::UNKNOWN };
+};
+
+/** All information required for building the @ref ClKernelCode */
+struct ClCodeBuilderContext
+{
+ GpuInfo gpu_info{};
+};
+
+Status set_tile_info(ClKernelBlueprint &, const TileDescriptor &);
+
+/** Build final kernel source from KernelBlueprint */
+Status build(ClKernelCode &code, const ClCodeBuilderContext &, ClKernelBlueprint &);
+
+///// Building /////
+
+///// Tuning /////
+struct ClExecutionDescriptor
+{
+ cl::NDRange suggested_lws{}; /**< Suggested local work-group size for optimal performance if not zero */
+ cl::NDRange gws{}; /**< Global work-group to be used */
+};
+
+Status tune_static(ClExecutionDescriptor &, const ClKernelCode &);
+
+///// Tuning /////
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif //ARM_COMPUTE_EXPERIMENTAL_CLKERNELBUILDINGAPI_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h
new file mode 100644
index 0000000000..3b5160a055
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMMON_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMMON_H
+
+#include "arm_compute/core/CL/CLCompileContext.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/GPUTarget.h"
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h"
+
+#include <queue>
+#include <stack>
+#include <string>
+#include <unordered_set>
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+/** We introduce the concept of *Shared Variables* in the context of kernel building.
+ * They are variables that can be accessed / shared among all the kernel components within a single kernel.
+ * For now we consider 2 groups of shared variables:
+ * Argument: The argument variables (parameters) of a kernel
+ * Automatic: The automatic variables declared inside a kernel
+ * All Shared Variables have the same kernel scope, and are thus visible to all kernel components
+*/
+
+enum class SharedVarIO
+{
+ Input,
+ Output
+};
+
+enum class SharedVarGroup
+{
+ Argument, // Parameters to a kernel function
+ Automatic // Automatic variables declared within the kernel body
+};
+
+/** Specifies a shared variable ink for a component.
+ * It describes all the information that's availbale when a component is constructed / added:
+ * e.g. its linkage (via ArgumentID and io) and its group
+ * This is not shared variable on its own, but is used for instantiating a SharedVar when building the code
+ */
+struct SharedVarLink
+{
+ ArgumentID arg_id{ g_arg_placeholder };
+ SharedVarIO io{ SharedVarIO::Input };
+ SharedVarGroup group{ SharedVarGroup::Argument };
+ bool is_empty() const
+ {
+ return arg_id == g_arg_placeholder;
+ }
+};
+
+/** A table of all the variables used in the kernel / blueprint
+ * NOTE: the order they appear in the table is the order of their "declaration" in the component code, and is also their ID
+ * NOTE: the variables all have the scope of the full kernel function
+ */
+class SharedVarTable
+{
+public:
+ struct SharedVar
+ {
+ SharedVarGroup group;
+ std::string uniq_name; // Unique name, also the final variable name used in the built code
+ ClKernelArgRuntimeDescriptor desc; // Automatic variables can and should still be described using this struct
+ };
+
+ using Arguments = std::vector<SharedVar>;
+
+ /** @note: The order of insertion is important. There is one precondition:
+ * PRECOND: The components have been sorted topologically / is being traversed in topological order
+ * This ensures that all the consumer var links (Output, Automatic Links) can consume (return) the producer var links when they're referred
+ */
+ SharedVar add(SharedVarLink var_link, ClKernelArgRuntimeDescriptor runtime_desc, const std::string &name = "unnamed")
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(var_link.is_empty(), "Non-empty SharedVarLink expected");
+ auto var_id = _num_var;
+ std::stringstream ss;
+ ss << name << "_" << var_id;
+ const auto uniq_name = ss.str();
+ SharedVar var{ var_link.group, uniq_name, runtime_desc };
+
+ if(var_link.group == SharedVarGroup::Argument)
+ {
+ _arguments.emplace(var_id, var);
+ _num_var++;
+ _var_id_lut[var_link.arg_id] = var_id;
+ }
+ else if(var_link.group == SharedVarGroup::Automatic)
+ {
+ if(var_link.io == SharedVarIO::Output)
+ {
+ _global_vars.emplace(var_id, var);
+ _num_var++;
+ _var_id_lut[var_link.arg_id] = var_id;
+ }
+ else
+ {
+ // For the input link, the var (and thus its arg_id) will always have been added by the time we get here if we traverse components in topological order
+ var = get_var(var_link.arg_id);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unrecognised SharedVarGroup");
+ }
+ return var;
+ }
+
+ SharedVar get_var(ArgumentID arg_id) const
+ {
+ const auto var_id = _var_id_lut.at(arg_id); // arg_id has to exist in lut to begin with
+ auto it = _global_vars.find(var_id);
+ if(it != _global_vars.end())
+ {
+ return it->second;
+ }
+ it = _arguments.find(var_id);
+ if(it != _arguments.end())
+ {
+ return it->second;
+ }
+ ARM_COMPUTE_ERROR("Cannot find component variable");
+ }
+
+ /** @note The arguments are returned in the order they are added
+ */
+ Arguments get_kernel_arguments() const
+ {
+ Arguments args{};
+ for(const auto &a : _arguments)
+ {
+ args.push_back(a.second);
+ }
+ return args;
+ }
+
+private:
+ using VarID = int32_t;
+
+private:
+ std::map<VarID, SharedVar> _global_vars{};
+ std::map<VarID, SharedVar> _arguments{};
+ std::unordered_map<ArgumentID, VarID> _var_id_lut{};
+ VarID _num_var{ 0 };
+};
+
+enum class ComponentType
+{
+ Simple,
+ Complex,
+ Store
+};
+
+using ComponentID = int32_t;
+using ComponentList = std::vector<ComponentID>;
+class IClKernelComponent
+{
+public:
+ using Link = SharedVarLink;
+ using Tag = std::string;
+ struct TagVal
+ {
+ TagVal() = default;
+ TagVal(SharedVarTable::SharedVar var)
+ : value{ var.uniq_name }
+ {
+ }
+
+ TagVal(ComponentID id)
+ : value{ std::to_string(id) }
+ {
+ }
+
+ std::string value{};
+ };
+ using TagLUT = std::unordered_map<Tag, TagVal>; // Used to instantiating a code template / replacing tags
+public:
+ virtual ~IClKernelComponent() = default;
+ virtual ComponentType get_component_type() const = 0;
+ virtual std::vector<Link> get_links() const = 0;
+ virtual std::string name() const = 0;
+
+ static std::string replace_tags(const std::string &code_template, const TagLUT &tags)
+ {
+ std::string replaced_code = "";
+ std::unordered_set<std::string> used_tags{};
+ bool scanning_pattern = false;
+ std::string pattern_found = "";
+ for(size_t i = 0; i < code_template.size() - 1; ++i)
+ {
+ if(!scanning_pattern)
+ {
+ if(code_template[i] == '{' && code_template[i + 1] == '{')
+ {
+ i += 1;
+ scanning_pattern = true;
+ pattern_found = "";
+ }
+ else
+ {
+ replaced_code += code_template[i];
+ }
+ }
+ else
+ {
+ if(code_template[i] == '}' && code_template[i + 1] == '}')
+ {
+ i += 1;
+ scanning_pattern = false;
+ std::string err = "Pattern " + pattern_found + " not found in tags";
+ ARM_COMPUTE_ERROR_ON_MSG(tags.find(pattern_found) == tags.end(), err.c_str());
+ replaced_code += tags.find(pattern_found)->second.value;
+ used_tags.insert(pattern_found);
+ }
+ else
+ {
+ pattern_found += code_template[i];
+ }
+ }
+ }
+ // Check for unused tags
+ for(const auto &tag : tags)
+ {
+ ARM_COMPUTE_UNUSED(tag);
+ ARM_COMPUTE_ERROR_ON_MSG(used_tags.find(tag.first) == used_tags.end(), "Warning: unused tags");
+ }
+ return replaced_code;
+ }
+ ComponentID id() const
+ {
+ return _id;
+ }
+ void set_id(ComponentID id)
+ {
+ _id = id;
+ }
+
+ virtual std::set<std::string> get_headers_list() const
+ {
+ return std::set<std::string> {};
+ }
+
+ virtual std::string get_additional_macros() const
+ {
+ return "";
+ }
+
+ virtual std::string get_component_code() const
+ {
+ return "";
+ }
+ /** "Allocate" all shared variables used in a component to the @p vtable, and generate a TagLUT used to instantiate the component code
+ *
+ * @param vtable
+ * @return TagLUT
+ */
+ virtual TagLUT allocate_vars(SharedVarTable &vtable) const = 0;
+
+ virtual std::string get_dst_addr_calculation() const
+ {
+ return "";
+ }
+
+private:
+ ComponentID _id{};
+};
+
+using ComponentUniquePtr = std::unique_ptr<IClKernelComponent>;
+
+/** Intermediate representation of the final, complete kernel source.
+ */
+struct ClKernelBlueprint::Implementation
+{
+public:
+ Implementation() = default;
+ ~Implementation() = default;
+
+public:
+ ArgumentID add_kernel_argument(const ClTensorDescriptor &tensor_desc)
+ {
+ _kernel_arguments.insert(std::make_pair(_num_args, tensor_desc));
+ _shared_var_group_lut[_num_args] = SharedVarGroup::Argument;
+ return _num_args++;
+ }
+
+ ArgumentID add_intermediate_tensor()
+ {
+ _intermediate_tensors.insert(_num_args);
+ _shared_var_group_lut[_num_args] = SharedVarGroup::Automatic;
+ return _num_args++;
+ }
+
+ void set_tile_info(const TileDescriptor &tile_info)
+ {
+ _tile_info = tile_info;
+ }
+
+ SharedVarGroup group(ArgumentID arg_id) const
+ {
+ if(arg_id == g_arg_placeholder)
+ {
+ // In case of placeholder, don't care what we return;
+ return SharedVarGroup::Argument;
+ }
+ return _shared_var_group_lut.at(arg_id);
+ }
+
+ void validate_arg_ids(std::initializer_list<ArgumentID> args) const
+ {
+ for(const auto arg_id : args)
+ {
+ ARM_COMPUTE_UNUSED(arg_id);
+ ARM_COMPUTE_ERROR_ON_MSG(_kernel_arguments.find(arg_id) == _kernel_arguments.end() && _intermediate_tensors.find(arg_id) == _intermediate_tensors.end() && arg_id != g_arg_placeholder,
+ "Trying to use an argument that hasn't been added to the blueprint");
+ }
+ }
+
+ void add_component(ComponentUniquePtr component)
+ {
+ if(component->get_component_type() == ComponentType::Complex)
+ {
+ ++_num_complex_components;
+ ARM_COMPUTE_ERROR_ON_MSG(_num_complex_components > 1, "Only one complex component per blueprint is supported.");
+ }
+
+ // This flag specifies if the current component is the root of the component graph
+ // If the root is set to -1, it means that a root hasn't been added yet
+ bool is_graph_root = true;
+
+ // Get an unique ID for the component that's being added
+ const ComponentID component_id = _num_components++;
+ component->set_id(component_id);
+
+ // Add this component to the component graph. Don't connect it to anything yet
+ _component_graph.emplace(component_id, ComponentList{});
+
+ int32_t positional_arg = 0;
+
+ // For every { arg_id, arg_io } passed along with this component...
+ for(const auto &link : component->get_links())
+ {
+ const ArgumentID &arg_id = link.arg_id;
+ const SharedVarIO &arg_io = link.io;
+
+ // A component is considered root only if all its input arguments are kernel arguments (or placeholders, which means nullptr)
+ // This performs a check on every argument, and if one of them doesn't respect the condition, the component is not considered root
+ is_graph_root &= (_kernel_arguments.find(arg_id) != _kernel_arguments.end()) || (arg_io == SharedVarIO::Output) || (arg_id == g_arg_placeholder);
+
+ // Add the arg_id to the map describing the input/output relationship between an argument and the components that use it, if it doesn't yet exist there
+ if(_outgoing_components.find(arg_id) == _outgoing_components.end())
+ {
+ _outgoing_components.emplace(arg_id, ComponentList{});
+ _incoming_components.emplace(arg_id, ComponentList{});
+ }
+
+ // If it's an input argument, connect any other component that has it as output with this component
+ // Additionally, set this component as one that treats this argument as "Input" (append to index 0)
+ // This is used so that we keep track of whether two components use the same argument, one as input and one as output
+ if(arg_io == SharedVarIO::Input)
+ {
+ for(const auto &prev_component : _incoming_components[arg_id])
+ {
+ _component_graph[prev_component].push_back(component_id);
+ }
+
+ _outgoing_components[arg_id].push_back(component_id);
+ }
+ // If it's an output argument, connect this component with any other component that has it as input
+ // Additionally, set this component as one that treats this argument as "Output" (append to index 1)
+ else
+ {
+ for(const auto &subseq_component : _outgoing_components[arg_id])
+ {
+ _component_graph[component_id].push_back(subseq_component);
+ }
+
+ _incoming_components[arg_id].push_back(component_id);
+ }
+
+ ++positional_arg;
+ }
+
+ if(is_graph_root)
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(_graph_root >= 0, "Trying to add more than one root to the graph");
+ _graph_root = component_id;
+ }
+
+ // Finally, add this component to the dictionary of components
+ _components.insert(std::make_pair(component_id, std::move(component)));
+ }
+
+ std::string build_kernel_name() const
+ {
+ std::string name = "";
+
+ auto stack = topological_sort();
+ while(!stack.empty())
+ {
+ name += _components.find(stack.top())->second->name() + (stack.size() > 2 ? "___" : "");
+ stack.pop();
+ }
+
+ std::cout << name << std::endl;
+ return name;
+ }
+
+ std::string build_code()
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(_graph_root < 0, "No root found in the component graph");
+
+ // These data structures will hold the data from all the components in the blueprint
+ std::set<std::string> headers_list{};
+ std::set<std::string> additional_macros{};
+ std::vector<std::string> component_codes{}; // vector because order matters
+
+ // Go through the components graph (topological sort) and fill the data structures above
+ auto stack = topological_sort();
+ while(!stack.empty())
+ {
+ auto curr_component_id = stack.top();
+ auto &curr_component = _components.find(curr_component_id)->second;
+
+ auto curr_headers_list = curr_component->get_headers_list();
+ auto curr_additional_macros = curr_component->get_additional_macros();
+ auto curr_component_code = curr_component->get_component_code();
+ const auto var_lut = curr_component->allocate_vars(_vtable); // Ideally can be merged with get_component_code once we have finer-grained code generation technique
+ component_codes.push_back(IClKernelComponent::replace_tags(curr_component_code, var_lut));
+
+ headers_list.insert(curr_headers_list.begin(), curr_headers_list.end());
+ if(!curr_additional_macros.empty()) // Some components might not have any
+ {
+ additional_macros.insert(curr_additional_macros);
+ }
+
+ stack.pop();
+ }
+
+ // This section assembles the data gathered by traversing the graph into the string "code"
+ std::string code = "";
+
+ for(auto &header : headers_list)
+ {
+ code += "#include \"" + header + "\"\n";
+ }
+
+ for(auto &macros : additional_macros)
+ {
+ code += macros;
+ }
+
+ code += generate_kernel_signature(_vtable.get_kernel_arguments());
+
+ code += "\n{\n\n";
+
+ code += " //------------------ START KERNEL_BUILDER_COORDINATE ---------------------\n\n";
+ code += generate_global_section();
+ code += " //------------------ END KERNEL_BUILDER_COORDINATE ---------------------\n";
+
+ for(auto &component_code : component_codes)
+ {
+ code += component_code;
+ }
+
+ code += "}\n";
+
+ return code;
+ }
+
+ std::string build_config_id() const
+ {
+ return "";
+ }
+
+ CLBuildOptions build_options() const
+ {
+ return CLBuildOptions{};
+ }
+
+ Window get_execution_window() const
+ {
+ return Window{};
+ }
+
+ ClKernelArgList get_arguments() const
+ {
+ ClKernelArgList arg_list{};
+ for(const auto &arg_var : _vtable.get_kernel_arguments())
+ {
+ arg_list.push_back(arg_var.desc);
+ }
+ return arg_list;
+ }
+
+private:
+ void topological_sort_utility(ComponentID component_id, std::unordered_set<ComponentID> &visited, std::stack<ComponentID> &stack) const
+ {
+ visited.insert(component_id);
+
+ for(auto connected_component : _component_graph.find(component_id)->second)
+ {
+ if(visited.find(connected_component) == visited.end())
+ {
+ topological_sort_utility(connected_component, visited, stack);
+ }
+ }
+
+ stack.push(component_id);
+ }
+
+ std::stack<ComponentID> topological_sort() const
+ {
+ std::stack<ComponentID> stack{};
+ std::unordered_set<ComponentID> visited{};
+
+ topological_sort_utility(_graph_root, visited, stack);
+
+ return stack;
+ }
+
+ std::string generate_argument_declaration(const SharedVarTable::SharedVar &var) const
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(var.group != SharedVarGroup::Argument, "An argument declaration can only be generated from a kernel argument");
+ std::string code;
+ switch(var.desc.tensor_arg_type)
+ {
+ case TensorArgType::Image:
+ {
+ code += "IMAGE_DECLARATION(" + var.uniq_name + ")";
+ break;
+ }
+ case TensorArgType::Image_3D:
+ {
+ code += "IMAGE_DECLARATION(" + var.uniq_name + "),\n";
+ code += "uint " + var.uniq_name + "_stride_z";
+ break;
+ }
+ case TensorArgType::Image_3D_Export_To_ClImage2D:
+ {
+ code += "__read_only image2d_t " + var.uniq_name + "_img,\n";
+ code += "uint " + var.uniq_name + "_stride_z,\n";
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Unsupported declaration generation for TensorArgType");
+ }
+ }
+ return code;
+ }
+
+ std::string generate_kernel_signature(const SharedVarTable::Arguments &argument_list) const
+ {
+ std::string code = "\n__kernel void " + build_kernel_name() + "(";
+
+ for(const auto &arg : argument_list)
+ {
+ code += "\n " + generate_argument_declaration(arg) + ",";
+ }
+
+ code[code.length() - 1] = ')';
+
+ return code;
+ }
+
+ std::string generate_global_section() const
+ {
+ std::string code = " uint g_x = get_global_id(0);\n";
+ code += " uint g_y = get_global_id(1);\n";
+ code += " uint g_z = get_global_id(2);\n\n";
+
+ size_t tile_dim_x = _tile_info.empty() ? 1 : _tile_info.tile_dims.x();
+ size_t tile_dim_y = _tile_info.empty() ? 1 : _tile_info.tile_dims.y();
+
+ switch(_tile_info.clipping)
+ {
+ case ClippingStrategy::TOP_LEFT:
+ code += " const bool g_cond_x = (g_x == 0);\n";
+ code += " const bool g_cond_y = (g_y == 0);\n";
+ break;
+ case ClippingStrategy::TOP_RIGHT:
+ code += " const bool g_cond_x = ((g_x + 1) * " + std::to_string(tile_dim_x) + " >= " + std::to_string(_tile_info.boundaries.x()) + ");\n";
+ code += " const bool g_cond_y = (g_y == 0);\n";
+ break;
+ case ClippingStrategy::BOTTOM_LEFT:
+ code += " const bool g_cond_x = (g_x == 0);\n";
+ code += " const bool g_cond_y = ((g_y + 1) * " + std::to_string(tile_dim_y) + " >= " + std::to_string(_tile_info.boundaries.y()) + ");\n";
+ break;
+ case ClippingStrategy::BOTTOM_RIGHT:
+ code += " const bool g_cond_x = ((g_x + 1) * " + std::to_string(tile_dim_x) + " >= " + std::to_string(_tile_info.boundaries.x()) + ");\n";
+ code += " const bool g_cond_y = ((g_y + 1) * " + std::to_string(tile_dim_y) + " >= " + std::to_string(_tile_info.boundaries.y()) + ");\n";
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported clipping strategy");
+ }
+
+ code += "\n REPEAT_VAR_INIT_TO_CONST(M0, uint, g_zout, 0);\n";
+ code += " REPEAT_VAR_INIT_TO_CONST(16, uint, g_zero, 0);\n\n";
+
+ return code;
+ }
+
+ TileDescriptor _tile_info{};
+
+ int32_t _num_args{};
+ int32_t _num_components{};
+ int32_t _num_complex_components{};
+
+ // Argument, components and intermediate tensors IDs with corresponding ptrs (except intermediate)
+ std::unordered_map<ComponentID, ComponentUniquePtr> _components{};
+ std::unordered_map<ArgumentID, ClTensorDescriptor> _kernel_arguments{};
+ std::unordered_set<ArgumentID> _intermediate_tensors{};
+ // Argument group lookup. Can be replaced by extending the ArgumentID type to include group info
+ std::unordered_map<ArgumentID, SharedVarGroup> _shared_var_group_lut{};
+
+ // Tracks all variables (e.g.: kernel arguments, kernel "global variables")
+ SharedVarTable _vtable{};
+
+ // Component directed graph (represented by an adjecency list of Component IDs)
+ // This is used to understand the ordering and bindings between components when generating the kernel
+ // It's initially set to -1 which means the graph has no root yet, since node IDs are positive numbers
+ ComponentID _graph_root{ -1 };
+ std::unordered_map<ComponentID, ComponentList> _component_graph{};
+
+ // Additional data structures used to define the relationships between components and arguments
+ // For each argument, it contains the list of components that consider it as an incoming or an outgoing argument
+ // E.g. tensor0 -> component0 -> tensor1
+ // _outgoing_components[tensor0] == {component0} (component0 is the outgoing component of tensor0. Component0 treats tensor0 as an input tensor)
+ // _incoming_components[tensor1] == {component0} (component0 is the incoming component of tensor1. Component1 treats tensor1 as an output tensor)
+ std::unordered_map<ArgumentID, ComponentList> _outgoing_components{};
+ std::unordered_map<ArgumentID, ComponentList> _incoming_components{};
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif //ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMMON_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Utils.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Utils.h
new file mode 100644
index 0000000000..41ab4e320b
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Utils.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_UTILS
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_UTILS
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+inline ::std::ostream &operator<<(::std::ostream &os, const CLBuildOptions::StringSet &build_opts)
+{
+ for(const auto &opt : build_opts)
+ {
+ os << opt << ",";
+ }
+ return os;
+}
+inline ::std::ostream &operator<<(::std::ostream &os, const CLBuildOptions &cl_build_opts)
+{
+ os << cl_build_opts.options();
+ return os;
+}
+
+inline std::string to_string(const CLBuildOptions &cl_build_opts)
+{
+ std::stringstream str;
+ str << cl_build_opts;
+ return str.str();
+}
+inline ::std::ostream &operator<<(::std::ostream &os, const ClKernelCode &code)
+{
+ os << "name: " << code.name << std::endl;
+ os << "code: " << code.code << std::endl;
+ os << "build_opts: " << code.build_options << std::endl;
+ return os;
+}
+inline std::string to_string(const ClKernelCode &code)
+{
+ std::stringstream str;
+ str << code;
+ return str.str();
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif //ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_UTILS
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.cpp
new file mode 100644
index 0000000000..a44b5faee2
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ComponentType ClElementwiseAddKernelComponent::get_component_type() const
+{
+ return ComponentType::Simple;
+}
+
+std::set<std::string> ClElementwiseAddKernelComponent::get_headers_list() const
+{
+ return std::set<std::string> { "gemm_helpers.h", "repeat.h" };
+}
+
+std::string ClElementwiseAddKernelComponent::get_component_code() const
+{
+ std::string code;
+ return R"_(
+ //------------------ START KERNEL {{meta_kernel_id}} ELTWISE_ADD ---------------------
+ // IN_0(Accumulator) {{acc}}
+ // IN_1(Addend) {{addend}}
+
+ // c = addend + c (mix-precision, broadcast, boundary aware)
+ {
+ __global uchar *addend_addr = {{addend}}_ptr + {{addend}}_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(g_y, M0, PARTIAL_STORE_M0) * {{addend}}_stride_y) + get_global_id(2) * {{addend}}_stride_z; \
+ LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, addend, addend_addr, 0, {{addend}}_stride_y, g_zero, PARTIAL_LOAD_M0, PARTIAL_LOAD_N0, PARTIAL_COND_Y, PARTIAL_COND_X); \
+ MIXED_PRECISION_ELTWISE_OP_BLOCK(ADD_X_POS_0, M0, N0, {{acc}}, addend, DATA_TYPE_ACCUMULATOR, addend_hp);
+ }
+ //------------------ END KERNEL {{meta_kernel_id}} ELTWISE_ADD ---------------------
+
+)_";
+}
+ClElementwiseAddKernelComponent::TagLUT ClElementwiseAddKernelComponent::allocate_vars(SharedVarTable &vtable) const
+{
+ // Determine which argument is the accumulator
+ Link accumulator;
+ Link addend;
+ if(_lhs.group == SharedVarGroup::Automatic)
+ {
+ accumulator = _lhs;
+ addend = _rhs;
+ }
+ else if(_rhs.group == SharedVarGroup::Automatic)
+ {
+ accumulator = _rhs;
+ addend = _lhs;
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Invalid elementwise component linking");
+ }
+ return {
+ { "meta_kernel_id", id() },
+ { "acc", vtable.add(accumulator, ClKernelArgRuntimeDescriptor(accumulator.arg_id, TensorArgType::Image_3D), "add_acc") },
+ { "addend", vtable.add(addend, ClKernelArgRuntimeDescriptor(addend.arg_id, TensorArgType::Image_3D), "add_addend") },
+ // {"dst", vtable.add(_dst, ClKernelArgRuntimeDescriptor(_dst.arg_id, TensorArgType::Image_3D), "dst")}, // dst is needed for the root version and/or non-inplace version should we need one
+ };
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h
new file mode 100644
index 0000000000..c0de4ac9b8
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLELEMENTWISEADDKERNELCOMPONENT_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLELEMENTWISEADDKERNELCOMPONENT_H
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ClElementwiseAddKernelComponent : public IClKernelComponent
+{
+public:
+ ClElementwiseAddKernelComponent(const Link &lhs, const Link &rhs, const Link &dst)
+ : _lhs{ lhs }, _rhs{ rhs }, _dst{ dst }
+ {
+ }
+ ComponentType get_component_type() const override;
+ std::set<std::string> get_headers_list() const override;
+ std::string get_component_code() const override;
+
+ virtual std::vector<Link> get_links() const override
+ {
+ return { _lhs, _rhs, _dst };
+ }
+
+ virtual TagLUT allocate_vars(SharedVarTable &vtable) const override;
+
+ virtual std::string name() const override
+ {
+ return "eltwise_add_" + std::to_string(id());
+ }
+
+private:
+ Link _lhs{};
+ Link _rhs{};
+ Link _dst{};
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLELEMENTWISEADDKERNELCOMPONENT_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.cpp
new file mode 100644
index 0000000000..1521973d55
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.cpp
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ComponentType ClGemmNativeKernelComponent::get_component_type() const
+{
+ return ComponentType::Complex;
+}
+
+std::set<std::string> ClGemmNativeKernelComponent::get_headers_list() const
+{
+ return std::set<std::string> { "./common/experimental/gemm_fused_post_ops/act_eltwise_op_act/fp_post_ops_act_eltwise_op_act.h", "gemm_helpers.h", "repeat.h" };
+}
+
+std::string ClGemmNativeKernelComponent::get_additional_macros() const
+{
+ return R"_(
+#define VFMA(a, b, c) \
+({ \
+ c = fma(a, b, c); \
+})
+
+#if M0 == 1
+#define RHS_VFMA_M0xN0(i, a, b, c) \
+ ({ \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+ })
+#elif M0 == 2 // M0 == 2
+#define RHS_VFMA_M0xN0(i, a, b, c) \
+ ({ \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+ })
+#elif M0 == 3 // M0 == 3
+#define RHS_VFMA_M0xN0(i, a, b, c) \
+ ({ \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+ })
+#elif M0 == 4 // M0 == 4
+#define RHS_VFMA_M0xN0(i, a, b, c) \
+ ({ \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+ })
+#elif M0 == 5 // M0 == 5
+#define RHS_VFMA_M0xN0(i, a, b, c) \
+ ({ \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
+ })
+#elif M0 == 6 // M0 == 6
+#define RHS_VFMA_M0xN0(i, a, b, c) \
+ ({ \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
+ })
+#elif M0 == 7 // M0 == 7
+#define RHS_VFMA_M0xN0(i, a, b, c) \
+ ({ \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \
+ })
+#elif M0 == 8 // M0 == 8
+#define RHS_VFMA_M0xN0(i, a, b, c) \
+ ({ \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \
+ VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##7).s##i), b, (c##7)); \
+ })
+#else // M0 not supported
+#error "M0 not supported"
+#endif // M0 not supported
+)_";
+}
+
+std::string ClGemmNativeKernelComponent::get_component_code() const
+{
+ std::string code = R"_(
+ //------------------ START KERNEL {{meta_kernel_id}} ---------------------
+ // IN_0(lhs) {{lhs}}
+ // IN_1(rhs) {{rhs}}
+ )_";
+
+ if(!_bias.is_empty())
+ {
+ code += R"_(
+ // IN_2(bias) {{bias}}
+ )_";
+ }
+
+ code += R"_(
+ // OUT(dst, accum) {{dst}}
+
+ // Initialize the accumulators
+ REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), {{dst}}, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
+ {
+#if defined(DUMMY_WORK_ITEMS)
+ if((g_x * N0 >= N) || (g_y * M0 >= M))
+ {
+ return;
+ }
+#endif // defined(DUMMY_WORK_ITEMS)
+
+ // Compute LHS matrix address
+ uint lhs_offset = {{lhs}}_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(g_y, M0, PARTIAL_STORE_M0) * (uint){{lhs}}_stride_y;
+
+ // Compute RHS matrix address
+ uint rhs_offset = {{rhs}}_offset_first_element_in_bytes + g_x * N0 * sizeof(DATA_TYPE);
+
+#if defined(MATRIX_B_DEPTH)
+ // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
+ rhs_offset += (g_z % MATRIX_B_DEPTH) * {{rhs}}_stride_z;
+#else // defined(MATRIX_B_DEPTH)
+ rhs_offset += g_z * {{rhs}}_stride_z;
+#endif // defined(MATRIX_B_DEPTH)
+
+ REPEAT_VAR_INIT_TO_CONST(M0, uint, zlhs, 0);
+
+#if defined(REINTERPRET_INPUT_AS_3D)
+ // The plane (zlhs) is calculated dividing M (g_y * M0) by HEIGHT_GEMM3D
+ CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(g_y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, {{lhs}}_cross_plane_pad, {{lhs}}_stride_y);
+
+ // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
+ // multiply lhs_stride_z by DEPTH_GEMM3D
+ lhs_offset += g_z * {{lhs}}_stride_z * DEPTH_GEMM3D;
+
+#else // defined(REINTERPRET_INPUT_AS_3D)
+
+ // Add offset for batched GEMM
+ lhs_offset += g_z * {{lhs}}_stride_z;
+
+#endif // defined(REINTERPRET_INPUT_AS_3D)
+
+ int i = 0;
+#if K0 > 1
+ for(; i <= (K - K0); i += K0)
+ {
+ // Supported cases (M0, K0):
+ // 1,2 - 1,3 - 1,4 - 1,8 - 1,16
+ // 2,2 - 2,3 - 2,4 - 2,8 - 2,16
+ // 3,2 - 3,3 - 3,4 - 3,8 - 3,16
+ // 4,2 - 4,3 - 4,4 - 4,8 - 4,16
+ // 5,2 - 5,3 - 5,4 - 5,8 - 5,16
+ // 6,2 - 6,3 - 6,4 - 6,8 - 6,16
+ // 7,2 - 7,3 - 7,4 - 7,8 - 7,16
+ // 8,2 - 8,3 - 8,4 - 8,8 - 8,16
+ // Load values from LHS matrix
+ LOAD_BLOCK(M0, K0, DATA_TYPE, a, {{lhs}}_ptr, lhs_offset, {{lhs}}_stride_y, zlhs);
+
+ // Load values from RHS matrix
+ LOAD_BLOCK(K0, N0, DATA_TYPE, b, {{rhs}}_ptr, rhs_offset, {{rhs}}_stride_y, g_zero);
+
+ RHS_VFMA_M0xN0(0, a, b0, {{dst}});
+ RHS_VFMA_M0xN0(1, a, b1, {{dst}});
+#if K0 > 2
+ RHS_VFMA_M0xN0(2, a, b2, {{dst}});
+#endif // K0 > 2
+#if K0 > 3
+ RHS_VFMA_M0xN0(3, a, b3, {{dst}});
+#endif // K0 > 3
+#if K0 > 4
+ RHS_VFMA_M0xN0(4, a, b4, {{dst}});
+ RHS_VFMA_M0xN0(5, a, b5, {{dst}});
+ RHS_VFMA_M0xN0(6, a, b6, {{dst}});
+ RHS_VFMA_M0xN0(7, a, b7, {{dst}});
+#endif // K0 > 4
+#if K0 > 8
+ RHS_VFMA_M0xN0(8, a, b8, {{dst}});
+ RHS_VFMA_M0xN0(9, a, b9, {{dst}});
+ RHS_VFMA_M0xN0(A, a, bA, {{dst}});
+ RHS_VFMA_M0xN0(B, a, bB, {{dst}});
+ RHS_VFMA_M0xN0(C, a, bC, {{dst}});
+ RHS_VFMA_M0xN0(D, a, bD, {{dst}});
+ RHS_VFMA_M0xN0(E, a, bE, {{dst}});
+ RHS_VFMA_M0xN0(F, a, bF, {{dst}});
+#endif // K0 > 8
+
+ lhs_offset += K0 * sizeof(DATA_TYPE);
+ rhs_offset += K0 * {{rhs}}_stride_y;
+ }
+#endif // K0 > 1
+ // Left-over accumulations
+ for(; i < K; ++i)
+ {
+ // Load values from LHS matrix
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a0 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 0 * {{lhs}}_stride_y + zlhs0));
+#if M0 > 1
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a1 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 1 * {{lhs}}_stride_y + zlhs1));
+#endif // M0 > 1
+#if M0 > 2
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a2 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 2 * {{lhs}}_stride_y + zlhs2));
+#endif // M0 > 2
+#if M0 > 3
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a3 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 3 * {{lhs}}_stride_y + zlhs3));
+#endif // M0 > 3
+#if M0 > 4
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a4 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 4 * {{lhs}}_stride_y + zlhs4));
+#endif // M0 > 4
+#if M0 > 5
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a5 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 5 * {{lhs}}_stride_y + zlhs5));
+#endif // M0 > 5
+#if M0 > 6
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a6 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 6 * {{lhs}}_stride_y + zlhs6));
+#endif // M0 > 6
+#if M0 > 7
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a7 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 7 * {{lhs}}_stride_y + zlhs7));
+#endif // M0 > 7
+
+ VEC_DATA_TYPE(DATA_TYPE, N0)
+ b = VLOAD(N0)(0, (__global DATA_TYPE *)({{rhs}}_ptr + rhs_offset + 0 * {{rhs}}_stride_y));
+ RHS_VFMA_M0xN0(0, a, b, {{dst}});
+
+ lhs_offset += sizeof(DATA_TYPE);
+ rhs_offset += {{rhs}}_stride_y;
+ }
+
+ // Multiply by the weight of matrix-matrix product and store the result
+#if defined(ALPHA)
+ SCALE_BLOCK(M0, DATA_TYPE, {{dst}}, ALPHA);
+#endif // defined(ALPHA)
+ )_";
+
+ if(!_bias.is_empty())
+ {
+ code += R"_(
+ // Add beta*bias
+#if defined(BROADCAST_BIAS)
+ __global uchar *bias_addr = {{bias}}_ptr + {{bias}}_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
+
+ LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, {{bias}}_stride_y, g_zero);
+
+#ifndef UNIT_BETA
+ SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
+#endif // UNIT_BIAS
+
+ // c = c + bias[broadcasted]
+ ADD_BLOCK_BROADCAST(M0, {{dst}}, bias0);
+
+#else // defined(BROADCAST_BIAS)
+ __global uchar *bias_addr = {{bias}}_ptr + {{bias}}_offset_first_element_in_bytes + (g_x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(g_y, M0,
+ PARTIAL_STORE_M0)
+ * {{bias}}_stride_y)
+ + g_z * {{bias}}_stride_z;
+
+ LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, {{bias}}_stride_y, g_zero);
+
+#ifndef UNIT_BETA
+ SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
+#endif // UNIT_BIAS
+
+ // c = c + bias
+ ADD_BLOCK(M0, {{dst}}, bias);
+
+#endif // defined(BROADCAST_BIAS)
+ )_";
+ }
+
+ code += R"_(
+ }
+ //------------------ END KERNEL {{meta_kernel_id}} ---------------------
+ )_";
+ return code.c_str();
+}
+
+ClGemmNativeKernelComponent::TagLUT ClGemmNativeKernelComponent::allocate_vars(SharedVarTable &vtable) const
+{
+ TagLUT lut{};
+
+ lut["meta_kernel_id"] = id();
+ lut["lhs"] = vtable.add(_lhs, ClKernelArgRuntimeDescriptor(_lhs.arg_id, TensorArgType::Image_3D), "lhs");
+ lut["rhs"] = vtable.add(_rhs, ClKernelArgRuntimeDescriptor(_rhs.arg_id, TensorArgType::Image_3D), "rhs");
+ if(!_bias.is_empty()) // optional bias
+ {
+ lut["bias"] = vtable.add(_bias, ClKernelArgRuntimeDescriptor(_bias.arg_id, TensorArgType::Image_3D), "bias");
+ }
+ lut["dst"] = vtable.add(_dst, ClKernelArgRuntimeDescriptor(_dst.arg_id, TensorArgType::Image_3D), "dst");
+ return lut;
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h
new file mode 100644
index 0000000000..38f007c07c
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLGEMMNATIVEKERNELCOMPONENT_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLGEMMNATIVEKERNELCOMPONENT_H
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ClGemmNativeKernelComponent : public IClKernelComponent
+{
+public:
+ ClGemmNativeKernelComponent(const Link &lhs, const Link &rhs, const Link &dst, const Link &bias = Link{})
+ : _lhs{ lhs }, _rhs{ rhs }, _bias{ bias }, _dst{ dst }
+ {
+ }
+ ComponentType get_component_type() const override;
+ std::set<std::string> get_headers_list() const override;
+ std::string get_additional_macros() const override;
+ std::string get_component_code() const override;
+ ClKernelArgList get_args();
+
+ virtual std::vector<Link> get_links() const override
+ {
+ return { _lhs, _rhs, _bias, _dst };
+ }
+
+ virtual TagLUT allocate_vars(SharedVarTable &vtable) const override;
+
+ virtual std::string name() const override
+ {
+ return "gemm_mm_native_" + std::to_string(id());
+ }
+
+private:
+ Link _lhs{};
+ Link _rhs{};
+ Link _bias{};
+ Link _dst{};
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLGEMMNATIVEKERNELCOMPONENT_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClKernelComponents.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClKernelComponents.h
new file mode 100644
index 0000000000..b751ce237f
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClKernelComponents.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_CLKERNELCOMPONENTS_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_CLKERNELCOMPONENTS_H
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h"
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h"
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h"
+
+#endif //ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_CLKERNELCOMPONENTS_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp
new file mode 100644
index 0000000000..430fafb89f
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ComponentType ClStoreBlockBoundaryAwareKernelComponent::get_component_type() const
+{
+ return ComponentType::Store;
+}
+
+std::string ClStoreBlockBoundaryAwareKernelComponent::get_component_code() const
+{
+ return R"_(
+ //------------------ START KERNEL {{meta_kernel_id}} STORE ---------------------
+
+ __global uchar *dst_addr = {{dst}}_ptr + {{dst}}_offset_first_element_in_bytes + (g_x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(g_y, M0, PARTIAL_STORE_M0) * {{dst}}_stride_y);
+
+#if defined(REINTERPRET_OUTPUT_AS_3D)
+ // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
+ // multiply dst_stride_z by DEPTH_GEMM3D
+ dst_addr += g_z * {{dst}}_stride_z * DEPTH_GEMM3D;
+
+#else // defined(REINTERPRET_OUTPUT_AS_3D)
+
+ // Add offset for batched GEMM
+ dst_addr += g_z * {{dst}}_stride_z;
+
+#endif // defined(REINTERPRET_OUTPUT_AS_3D)
+
+ STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, {{src}}, dst_addr, {{dst}}_stride_y, g_zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, g_cond_y, g_cond_x);
+
+ //------------------ END KERNEL {{meta_kernel_id}} STORE ---------------------
+
+)_";
+}
+ClStoreBlockBoundaryAwareKernelComponent::TagLUT ClStoreBlockBoundaryAwareKernelComponent::allocate_vars(SharedVarTable &vtable) const
+{
+ return {
+ { "meta_kernel_id", id() },
+ { "src", vtable.add(_src, ClKernelArgRuntimeDescriptor(_src.arg_id, TensorArgType::Image_3D), "src") },
+ { "dst", vtable.add(_dst, ClKernelArgRuntimeDescriptor(_dst.arg_id, TensorArgType::Image_3D), "dst") },
+ };
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h
new file mode 100644
index 0000000000..f0d01d30a9
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLSTOREKERNELCOMPONENTS_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLSTOREKERNELCOMPONENTS_H
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ClStoreBlockBoundaryAwareKernelComponent : public IClKernelComponent
+{
+public:
+ ClStoreBlockBoundaryAwareKernelComponent(const Link &src, const Link &dst)
+ : _src{ src }, _dst{ dst }
+ {
+ }
+ ComponentType get_component_type() const override;
+ std::string get_component_code() const override;
+
+ virtual std::vector<Link> get_links() const override
+ {
+ return { _src, _dst };
+ }
+
+ virtual TagLUT allocate_vars(SharedVarTable &vtable) const override;
+
+ virtual std::string name() const override
+ {
+ return "";
+ }
+
+private:
+ Link _src{};
+ Link _dst{};
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLSTOREKERNELCOMPONENTS_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp
new file mode 100644
index 0000000000..05912dfd81
--- /dev/null
+++ b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "src/core/CL/CLUtils.h"
+#include "src/gpu/cl/ClKernelLibrary.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+using namespace arm_compute::opencl;
+
+void ClCompositeKernel::configure(const ClCompileContext &compile_ctx, const ClKernelCode &cl_code)
+{
+ // Create kernel from kernel source string
+ opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get();
+ _kernel = static_cast<cl::Kernel>(compile_ctx.create_kernel(cl_code.name,
+ "" /* Program name: Used to as part of a unique string for built kernel cache. Not needed */,
+ cl_code.code,
+ klib.kernel_path() /* Kernel path: Used in cases of embedded kernels */,
+ cl_code.build_options.options(),
+ false /* Is source binary */));
+
+ // Configure execution window
+ IClKernel::configure_internal(cl_code.window);
+
+ // Set config id for lws tuning
+ _config_id = cl_code.config_id;
+
+ // Set kernel arguments
+ _arguments = cl_code.arguments;
+}
+
+inline void ClCompositeKernel::add_tensor_argument(unsigned int &idx, const ClKernelArgRuntimeDescriptor &arg, ICLTensor *tensor, const Window &arg_slice)
+{
+ switch(arg.tensor_arg_type)
+ {
+ case TensorArgType::Scalar:
+ {
+ ARM_COMPUTE_ERROR("Unsupported yet");
+ break;
+ }
+
+ case TensorArgType::Vector:
+ {
+ add_1D_tensor_argument(idx, tensor, arg_slice);
+ break;
+ }
+
+ case TensorArgType::Image:
+ {
+ add_2D_tensor_argument(idx, tensor, arg_slice);
+ break;
+ }
+ case TensorArgType::Image_Reinterpret_As_3D:
+ {
+ add_2D_tensor_argument(idx, tensor, arg_slice);
+ const unsigned int total_cross_plane_pad = tensor->info()->padding().top + tensor->info()->padding().bottom;
+ _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad));
+ break;
+ }
+ case TensorArgType::Image_Export_To_ClImage2D:
+ {
+ const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) * tensor->info()->dimension(2) * tensor->info()->dimension(3));
+ const size_t image_row_pitch = tensor->info()->strides_in_bytes()[1];
+ cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d, tensor->info()->data_type(), image_row_pitch);
+ _kernel.setArg(idx++, tensor_image2d);
+ break;
+ }
+
+ case TensorArgType::Image_3D:
+ {
+ add_2D_tensor_argument(idx, tensor, arg_slice);
+ _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(tensor->info()->strides_in_bytes()[2]));
+ break;
+ }
+ case TensorArgType::Image_3D_Export_To_ClImage2D:
+ {
+ const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) * tensor->info()->dimension(2) * tensor->info()->dimension(3));
+ const size_t image_row_pitch = tensor->info()->strides_in_bytes()[1];
+ cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d, tensor->info()->data_type(), image_row_pitch);
+ _kernel.setArg(idx++, tensor_image2d);
+ _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(tensor->info()->strides_in_bytes()[2]));
+ break;
+ }
+
+ case TensorArgType::Tensor_3D:
+ {
+ add_3D_tensor_argument(idx, tensor, arg_slice);
+ break;
+ }
+
+ case TensorArgType::Tensor_4D:
+ {
+ add_4D_tensor_argument(idx, tensor, arg_slice);
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Unsupported");
+ }
+ }
+}
+
+void ClCompositeKernel::run_composite_op(TensorBinding &tensors, const Window &window, cl::CommandQueue &queue, const ClExecutionDescriptor &exec_desc)
+{
+ ARM_COMPUTE_UNUSED(exec_desc);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ Window slice = window.first_slice_window_3D();
+ // Don't slice matrix along the z dimension if matrix has just 2 dimensions and matrix A more than 2
+ // This scenario can happen when the matrix multiplication is used to perform a convolution operation
+ Window slice_fixed_z = slice;
+ slice_fixed_z.set(Window::DimX, Window::Dimension(0, 1, 1));
+ slice_fixed_z.set(Window::DimY, Window::Dimension(0, 1, 1));
+ unsigned int idx = 0;
+ do
+ {
+ // Set kernel arguments
+ Window arg_slice = slice;
+ for(auto arg : _arguments)
+ {
+ auto tensor = tensors._binding.at(arg.arg_id);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
+ if(!arg.slide_along_dimz)
+ {
+ // The stride_z for matrix must be zero if we do not slice
+ ARM_COMPUTE_ERROR_ON(tensor->info()->strides_in_bytes()[3] != 0);
+ arg_slice = slice_fixed_z;
+ }
+ add_tensor_argument(idx, arg, tensor, arg_slice);
+ }
+
+ // Dispatch kernel
+ bool use_dummy_work_items = false;
+ enqueue(queue, *this, slice, lws_hint(), use_dummy_work_items);
+ }
+ while(window.slide_window_slice_3D(slice));
+}
+
+Status bind_arguments(ITensorPack &, const ClKernelCode &, const TensorBinding &)
+{
+ return Status{};
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h
new file mode 100644
index 0000000000..29d30003c3
--- /dev/null
+++ b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h"
+
+#include "src/gpu/cl/ClCompileContext.h"
+#include "src/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+struct TensorBinding
+{
+ TensorBinding(const std::map<ArgumentID, ICLTensor *> binding)
+ : _binding{ binding }
+ {
+ }
+ std::map<ArgumentID, ICLTensor *> _binding;
+};
+class ClCompositeKernel : public opencl::IClKernel
+{
+public:
+ void configure(const opencl::ClCompileContext &, const ClKernelCode &);
+
+ /** Run the composite kernel
+ *
+ * @param tensors TensorBinding object containing run-time tensors information
+ * @param window Execution window
+ * @param queue OpenCL Command queue
+ * @param exec_desc Descriptor containing execution information
+ */
+ virtual void run_composite_op(TensorBinding &tensors, const Window &window, cl::CommandQueue &queue, const ClExecutionDescriptor &exec_desc) override;
+
+private:
+ inline void add_tensor_argument(unsigned int &idx, const ClKernelArgRuntimeDescriptor &arg, ICLTensor *tensor, const Window &arg_slice);
+
+private:
+ ClKernelArgList _arguments{}; /** All kernel arguments required by runtime */
+};
+
+/** Argument Binding.
+ * Tensor Arguments to ICLKernel run_op method need to be passed via an ITensorPack. So the bind_arguments is essentially a converter from TensorBinding to ITensorPack
+ */
+Status bind_arguments(ITensorPack &tensor_pack, const ClKernelCode &, const TensorBinding &);
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file
diff --git a/src/runtime/CL/CLScheduler.cpp b/src/runtime/CL/CLScheduler.cpp
index 9d340438b8..22bf850d6e 100644
--- a/src/runtime/CL/CLScheduler.cpp
+++ b/src/runtime/CL/CLScheduler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,6 +27,10 @@
#include "arm_compute/runtime/CL/CLTuner.h"
#include "src/core/CL/ICLKernel.h"
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+#include "src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h"
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
namespace arm_compute
{
cl::Context &CLScheduler::context()
@@ -185,6 +189,35 @@ void CLScheduler::enqueue_common(ICLKernel &kernel, ITensorPack &tensors, bool f
}
}
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+void CLScheduler::enqueue_common(ICLKernel &kernel, experimental::dynamic_fusion::TensorBinding &tensors, const experimental::dynamic_fusion::ClExecutionDescriptor &exec_desc, bool flush)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(!_is_initialised,
+ "The CLScheduler is not initialised yet! Please call the CLScheduler::get().default_init(), \
+ or CLScheduler::get()::init() and CLKernelLibrary::get()::init() function before running functions!");
+
+ const bool inject_memory = !tensors._binding.empty();
+
+ // Run kernel
+ inject_memory ? kernel.run_composite_op(tensors, kernel.window(), _queue, exec_desc) : kernel.run(kernel.window(), _queue);
+
+ if(_job_chaining_enabled)
+ {
+ if(++_job_chaining_count >= _job_chaining_size)
+ {
+ _job_chaining_count = 0;
+ _queue.flush();
+ }
+ }
+ else if(flush)
+ {
+ _queue.flush();
+ }
+}
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
void CLScheduler::enqueue(ICLKernel &kernel, bool flush)
{
ITensorPack pack;
@@ -196,6 +229,15 @@ void CLScheduler::enqueue_op(ICLKernel &kernel, ITensorPack &tensors, bool flush
enqueue_common(kernel, tensors, flush);
}
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+void CLScheduler::enqueue_op(ICLKernel &kernel, experimental::dynamic_fusion::TensorBinding &tensors, const experimental::dynamic_fusion::ClExecutionDescriptor &exec_desc, bool flush)
+{
+ enqueue_common(kernel, tensors, exec_desc, flush);
+}
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
void CLScheduler::enable_job_chaining(int job_chaining_size)
{
_job_chaining_enabled = true;
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index f465577372..80b9ecbd92 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -406,6 +406,17 @@ public:
template <typename T, typename DataType>
void fill_static_values(T &&tensor, const std::vector<DataType> &values) const;
+ // Function type to generate a number to fill tensors.
+ template <typename ResultType>
+ using GeneratorFunctionType = std::function<ResultType(void)>;
+ /** Fill a tensor with a value generator function.
+ *
+ * @param[in, out] tensor To be filled tensor.
+ * @param[in] generate_value A function that generates values.
+ */
+ template <typename T, typename ResultType>
+ void fill_with_generator(T &&tensor, const GeneratorFunctionType<ResultType> &generate_value) const;
+
private:
// Function prototype to convert between image formats.
using Converter = void (*)(const RawTensor &src, RawTensor &dst);
@@ -413,9 +424,6 @@ private:
using Extractor = void (*)(const RawTensor &src, RawTensor &dst);
// Function prototype to load an image file.
using Loader = RawTensor (*)(const std::string &path);
- // Function type to generate a number to fill tensors.
- template <typename ResultType>
- using GeneratorFunctionType = std::function<ResultType(void)>;
const Converter &get_converter(Format src, Format dst) const;
const Converter &get_converter(DataType src, Format dst) const;
@@ -460,14 +468,6 @@ private:
*/
const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format, Channel channel) const;
- /** Fill a tensor with a value generator function.
- *
- * @param[in, out] tensor To be filled tensor.
- * @param[in] generate_value A function that generates values.
- */
- template <typename T, typename ResultType>
- void fill_with_generator(T &&tensor, const GeneratorFunctionType<ResultType> &generate_value) const;
-
mutable TensorCache _cache{};
mutable arm_compute::Mutex _format_lock{};
mutable arm_compute::Mutex _channel_lock{};
diff --git a/tests/SConscript b/tests/SConscript
index 7b05dcbc5f..bcbd5a2f31 100644
--- a/tests/SConscript
+++ b/tests/SConscript
@@ -115,6 +115,9 @@ filter_pattern = test_env['test_filter']
files_validation += Glob('validation/CPP/' + filter_pattern)
if env['opencl']:
+ if env['experimental_dynamic_fusion'] and not env['embed_kernels']:
+ test_env.Append(CPPDEFINES = ['ENABLE_EXPERIMENTAL_DYNAMIC_FUSION'])
+
filter_pattern = test_env['test_filter']
test_env.Append(CPPDEFINES=['ARM_COMPUTE_CL'])
diff --git a/tests/validation/CL/UNIT/dynamic_fusion/ClCompositeKernel.cpp b/tests/validation/CL/UNIT/dynamic_fusion/ClCompositeKernel.cpp
new file mode 100644
index 0000000000..c4e7033914
--- /dev/null
+++ b/tests/validation/CL/UNIT/dynamic_fusion/ClCompositeKernel.cpp
@@ -0,0 +1,643 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h"
+
+#include "src/core/utils/helpers/float_ops.h"
+#include "src/gpu/cl/kernels/ClElementwiseKernel.h"
+#include "src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/ElementwiseOperations.h"
+#include "tests/validation/reference/GEMM.h"
+
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/AccessWindowStatic.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <chrono>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+/** Macros which measures the wall clock time, and records it into a map measurement_map with name clock_name */
+#define TICK(clock_name) \
+ auto clock_name##_tick = std::chrono::high_resolution_clock::now();
+#define TOCK(clock_name, measurement_map) \
+ auto clock_name##_tock = std::chrono::high_resolution_clock::now(); \
+ measurement_map["\"" #clock_name "\""] = duration_cast<microseconds>(clock_name##_tock - clock_name##_tick);
+#define TOCK_AVG(clock_name, measurement_map, num_iterations) \
+ auto clock_name##_tock = std::chrono::high_resolution_clock::now(); \
+ measurement_map["\"" #clock_name "\""] = duration_cast<microseconds>((clock_name##_tock - clock_name##_tick) / (num_iterations));
+
+template <typename T, typename U>
+void fill(U &&tensor, int seed)
+{
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
+ library->fill(tensor, distribution, seed);
+
+ // Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0)
+ DistributionType distribution_inf{ T(std::numeric_limits<float>::infinity()), T(std::numeric_limits<float>::infinity()) };
+ library->fill_borders_with_garbage(tensor, distribution_inf, seed);
+}
+
+using ElementsProcessed = Steps;
+std::pair<Status, Window> mock_gemm_native_validate_and_configure_window(ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info,
+ const GEMMKernelInfo &gemm_info, ElementsProcessed &num_elements_processed)
+{
+ unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
+ unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
+ bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
+ bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
+
+ Window win{};
+ Window win_out{};
+ bool window_changed = false;
+
+ // In case both input and dst have to be reinterpreted as 3D tensors,
+ // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
+ if(reinterpret_input_as_3d == reinterpret_output_as_3d)
+ {
+ reinterpret_output_as_3d = false;
+ }
+
+ // dst tensor auto initialization if not yet initialized
+ auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)));
+
+ TensorInfo tmp_info(*dst);
+
+ if(reinterpret_output_as_3d)
+ {
+ // Since the dst tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
+ // the window needs to be constructed on the 2D collapsed version of the tensor
+ TensorShape tmp_shape(dst->tensor_shape());
+ tmp_shape.collapse(2U, 1U);
+ tmp_info.set_tensor_shape(tmp_shape);
+ }
+
+ // Configure kernel window
+ num_elems_processed_per_iteration_x = rhs_info.n0;
+ num_elems_processed_per_iteration_y = lhs_info.m0;
+
+ win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
+ win_out = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
+
+ AccessWindowStatic src0_access(src0, 0, 0,
+ src0->dimension(0),
+ src0->dimension(1));
+ AccessWindowStatic src1_access(src1, 0, 0,
+ ceil_to_multiple(src1->dimension(0), num_elems_processed_per_iteration_x),
+ src1->dimension(1));
+ AccessWindowStatic dst_access(dst, 0, 0,
+ dst->dimension(0),
+ dst->dimension(1));
+
+ if(src2 != nullptr)
+ {
+ const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
+
+ AccessWindowStatic src2_access(src2, 0, 0,
+ ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x),
+ src2->dimension(1));
+
+ window_changed = update_window_and_padding(win, src0_access, src1_access, src2_access) || // window used by the execute_window_loop
+ update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor
+ }
+ else
+ {
+ window_changed = update_window_and_padding(win, src0_access, src1_access) || // window used by the execute_window_loop
+ update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor
+ }
+
+ // Collapse along the Z direction
+ // This collapse needs to be here in order to tune the Z dimension of LWS
+ Window collapsed = win;
+ const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(dst->num_dimensions()), 2u);
+ collapsed = win.collapse(win, dimension_to_collapse);
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, collapsed);
+}
+
+void set_build_options(ClKernelCode &cl_code, GemmNativeDescriptor gemm_native_desc,
+ const TensorInfo &t_lhs_info,
+ const TensorInfo &t_rhs_info,
+ const TensorInfo *t_bias_info,
+ const TensorInfo &t_dst_info)
+{
+ CLBuildOptions ref_cl_build_options;
+ {
+ // If reinterpret_input_as_3d = reinterpret_output_as_3d = true,
+ // we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
+ // This means that the actual m used by the kernel is given by dst->dimension(1) and not by gemm_info.m
+ auto reinterpret_input_as_3d = gemm_native_desc.reinterpret_input_as_3d;
+ auto reinterpret_output_as_3d = gemm_native_desc.depth_output_gemm3d != 0;
+ auto _slide_matrix_b = (t_rhs_info.num_dimensions() >= t_lhs_info.num_dimensions());
+ auto _use_dummy_work_items = false;
+ // In case both input and dst have to be reinterpreted as 3D tensors,
+ // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
+ if(reinterpret_input_as_3d == reinterpret_output_as_3d)
+ {
+ reinterpret_input_as_3d = false;
+ reinterpret_output_as_3d = false;
+ }
+
+ const unsigned int internal_m = reinterpret_output_as_3d ? gemm_native_desc.m : t_dst_info.dimension(1);
+
+ const unsigned int h_gemm_3d = reinterpret_output_as_3d ? t_dst_info.dimension(1) : t_lhs_info.dimension(1);
+ const unsigned int d_gemm_3d = reinterpret_output_as_3d ? t_dst_info.dimension(2) : t_lhs_info.dimension(2);
+
+ // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
+ const unsigned int partial_store_m0 = internal_m % gemm_native_desc.lhs_info.m0;
+ const unsigned int partial_store_n0 = gemm_native_desc.n % gemm_native_desc.rhs_info.n0;
+
+ // Shrink M0 to be always <= M (internal_m) to prevent out-of-bounds reads.
+ const unsigned int internal_m0 = std::min(internal_m, gemm_native_desc.lhs_info.m0);
+
+ ref_cl_build_options.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(t_dst_info.data_type()));
+ ref_cl_build_options.add_option_if(!(helpers::float_ops::is_one(gemm_native_desc.alpha)), "-DALPHA=" + float_to_string_with_full_precision(gemm_native_desc.alpha));
+ ref_cl_build_options.add_option_if(t_bias_info != nullptr, "-DBETA=" + float_to_string_with_full_precision(gemm_native_desc.beta));
+ ref_cl_build_options.add_option_if(helpers::float_ops::is_one(gemm_native_desc.beta), "-DUNIT_BETA");
+ ref_cl_build_options.add_option_if(gemm_native_desc.broadcast_bias, "-DBROADCAST_BIAS");
+ ref_cl_build_options.add_option_if(reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
+ ref_cl_build_options.add_option_if(reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
+ ref_cl_build_options.add_option_if(reinterpret_input_as_3d || reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d));
+ ref_cl_build_options.add_option_if(reinterpret_input_as_3d || reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
+ ref_cl_build_options.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(t_rhs_info.dimension(2)));
+ ref_cl_build_options.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
+ ref_cl_build_options.add_option("-DM=" + support::cpp11::to_string(internal_m));
+ ref_cl_build_options.add_option("-DN=" + support::cpp11::to_string(gemm_native_desc.n));
+ ref_cl_build_options.add_option("-DK=" + support::cpp11::to_string(gemm_native_desc.k));
+ ref_cl_build_options.add_option("-DM0=" + support::cpp11::to_string(internal_m0));
+ ref_cl_build_options.add_option("-DN0=" + support::cpp11::to_string(gemm_native_desc.rhs_info.n0));
+ ref_cl_build_options.add_option("-DK0=" + support::cpp11::to_string(gemm_native_desc.rhs_info.k0));
+ ref_cl_build_options.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
+ ref_cl_build_options.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
+ // Manually add PostOps
+ {
+ ref_cl_build_options.add_option("-DOP=ADD_X_POS_1");
+ ref_cl_build_options.add_option("-DP2_ELTWISE_ARG1_HEIGHT=" + support::cpp11::to_string(t_dst_info.dimension(1)));
+ ref_cl_build_options.add_option("-DP2_ELTWISE_ARG1_WIDTH=" + support::cpp11::to_string(t_dst_info.dimension(0)));
+ }
+ }
+ cl_code.build_options = ref_cl_build_options;
+}
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(UNIT)
+TEST_SUITE(DYNAMIC_FUSION)
+TEST_SUITE(ClCompositeKernel)
+TEST_SUITE(Validate)
+
+TEST_CASE(MoveNet_SubGraph_1, framework::DatasetMode::ALL)
+{
+ /* Computation:
+ * out = add(addend, gemm_native(lhs, rhs, bias)) (non-broadcast)
+ */
+ const auto data_type = DataType::F32;
+ const auto m = 5U;
+ const auto n = 4U;
+ const auto k = 3U;
+ const auto t_lhs_shape = TensorShape(k, m);
+ const auto t_rhs_shape = TensorShape(n, k);
+ const auto t_dst_shape = TensorShape(n, m);
+ auto t_lhs_info = TensorInfo(t_lhs_shape, 1, data_type);
+ auto t_rhs_info = TensorInfo(t_rhs_shape, 1, data_type);
+ const auto t_bias_info = TensorInfo(TensorShape(), 1, DataType::F32);
+ auto t_dst_info = TensorInfo(t_dst_shape, 1, data_type);
+
+ const ClTensorDescriptor t_lhs_desc{ &t_lhs_info, 2 };
+ const ClTensorDescriptor t_rhs_desc{ &t_rhs_info, 2 };
+ const ClTensorDescriptor t_bias_desc{ &t_bias_info, 2 };
+ const ClTensorDescriptor t_addend_desc{ &t_dst_info, 2 };
+ const ClTensorDescriptor t_dst_desc{ &t_dst_info, 2 };
+
+ ClKernelBlueprint bp;
+ ArgumentID tid_lhs;
+ ArgumentID tid_rhs;
+ ArgumentID tid_l0_bias = g_arg_placeholder;
+ ArgumentID tid_l1_addend;
+ ArgumentID tid_dst;
+ auto st = add_tensor_argument(bp, t_lhs_desc, tid_lhs);
+ st = add_tensor_argument(bp, t_rhs_desc, tid_rhs);
+ st = add_tensor_argument(bp, t_addend_desc, tid_l1_addend);
+ st = add_tensor_argument(bp, t_dst_desc, tid_dst);
+
+ const auto common_kernel_desc = ClKernelComponentDescriptor{};
+ const GemmNativeDescriptor gemm_native_desc{ 1.0, 1.0, m, n, k };
+ const GEMMKernelInfo gemm_info{ m, n, k, 0, false, false, false, false, ActivationLayerInfo{}, 1, 1, gemm_native_desc.lhs_info, gemm_native_desc.rhs_info, 0, 0 };
+ const EltwiseAddDescriptor eltwise_add_desc{ ConvertPolicy::WRAP };
+ const TileDescriptor store_tile_info{};
+
+ ArgumentID tid_acc;
+ st = add_tensor_intermed(bp, tid_acc);
+ st = add_kcomp_gemm_native(bp, common_kernel_desc, gemm_native_desc, tid_lhs, tid_rhs, tid_l0_bias, tid_acc);
+
+ st = add_kcomp_eltwise_add(bp, common_kernel_desc, EltwiseAddDescriptor{}, tid_l1_addend, tid_acc, tid_acc);
+ st = add_kcomp_store(bp, common_kernel_desc, tid_acc, tid_dst, StoreType::StoreBlockBoundaryAware);
+
+ ClKernelCode cl_code;
+
+ st = set_tile_info(bp, store_tile_info);
+ st = build(cl_code, ClCodeBuilderContext{ GpuInfo{ GPUTarget::G71 } }, bp);
+
+ set_build_options(cl_code, gemm_native_desc, t_lhs_info, t_rhs_info, nullptr, t_dst_info);
+ ElementsProcessed num_elements_processed{};
+ auto win_config = mock_gemm_native_validate_and_configure_window(&t_lhs_info, &t_rhs_info, nullptr, &t_dst_info, gemm_native_desc.lhs_info, gemm_native_desc.rhs_info, gemm_info,
+ num_elements_processed);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ cl_code.window = win_config.second;
+
+ ClExecutionDescriptor exec_desc;
+ st = tune_static(exec_desc, cl_code);
+
+ CLScheduler::get().default_init();
+ ClCompositeKernel kernel;
+ kernel.configure(CLKernelLibrary::get().get_compile_context(), cl_code);
+
+ // Construct tensors
+ CLTensor t_lhs{};
+ CLTensor t_rhs{};
+ CLTensor t_l1_addend{};
+ CLTensor t_dst{};
+ // Init tensors
+ {
+ t_lhs.allocator()->init(t_lhs_info);
+ t_rhs.allocator()->init(t_rhs_info);
+ t_l1_addend.allocator()->init(t_dst_info);
+ t_dst.allocator()->init(t_dst_info);
+ }
+ // "Pack" tensors
+ TensorBinding tensors({ { tid_lhs, &t_lhs },
+ { tid_rhs, &t_rhs },
+ { tid_l1_addend, &t_l1_addend },
+ { tid_dst, &t_dst }
+ });
+ // Allocate and fill tensors
+ {
+ t_lhs.allocator()->allocate();
+ t_rhs.allocator()->allocate();
+ t_l1_addend.allocator()->allocate();
+ t_dst.allocator()->allocate();
+ fill<float>(CLAccessor(t_lhs), 0);
+ fill<float>(CLAccessor(t_rhs), 1);
+ fill<float>(CLAccessor(t_l1_addend), 2);
+ }
+
+ CLScheduler::get().enqueue_op(kernel, tensors, exec_desc, true);
+
+ // Create reference
+ SimpleTensor<float> ref_t_lhs{ t_lhs_shape, data_type, 1 };
+ SimpleTensor<float> ref_t_rhs{ t_rhs_shape, data_type, 1 };
+ SimpleTensor<float> ref_t_bias_placeholder{ t_dst_shape, data_type, 1 };
+ SimpleTensor<float> ref_t_l1_addend{ t_dst_shape, data_type, 1 };
+
+ // Fill reference
+ fill<float>(ref_t_lhs, 0);
+ fill<float>(ref_t_rhs, 1);
+ fill<float>(ref_t_l1_addend, 2);
+ const auto ref_t_dst = reference::arithmetic_operation(
+ ArithmeticOperation::ADD,
+ ref_t_l1_addend,
+ reference::gemm(ref_t_lhs, ref_t_rhs, ref_t_bias_placeholder, gemm_native_desc.alpha, 0.f /* To disable bias */),
+ data_type,
+ eltwise_add_desc.convert_policy);
+
+ RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
+ validate(CLAccessor(t_dst), ref_t_dst, tolerance_f32);
+}
+
+TEST_SUITE_END() // Validate
+
+TEST_SUITE(Benchmark)
+TEST_CASE(MoveNet_SubGraph_1, framework::DatasetMode::ALL)
+{
+ using std::chrono::duration_cast;
+ using std::chrono::microseconds;
+ const int num_iterations = 200;
+ std::map<std::string, std::chrono::microseconds> measurements;
+ /* Computation:
+ * out = add(addend, gemm_native(lhs, rhs, bias))
+ */
+ const auto data_type = DataType::F32;
+ const unsigned int m = 12 * 12;
+ const unsigned int n = 64;
+ const unsigned int k = 384;
+ const auto t_lhs_shape = TensorShape(k, m);
+ const auto t_rhs_shape = TensorShape(n, k);
+ const auto t_dst_shape = TensorShape(n, m);
+ auto t_lhs_info = TensorInfo(t_lhs_shape, 1, data_type);
+ auto t_rhs_info = TensorInfo(t_rhs_shape, 1, data_type);
+ auto t_bias_info = TensorInfo(TensorShape(), 1, data_type);
+ auto t_l0_dst_info = TensorInfo(t_dst_shape, 1, data_type); // Intermediate tensor for cond3
+ auto t_l1_rhs_info = TensorInfo(t_dst_shape, 1, data_type);
+ auto t_dst_info = TensorInfo(t_dst_shape, 1, data_type);
+
+ const auto common_kernel_desc = ClKernelComponentDescriptor{};
+ const GemmNativeDescriptor gemm_native_desc{ 1.0, 0.0, m, n, k };
+ const GEMMKernelInfo gemm_info{ m, n, k, 0, false, false, false, false, ActivationLayerInfo{}, 1, 1, gemm_native_desc.lhs_info, gemm_native_desc.rhs_info, 0, 0 };
+ const EltwiseAddDescriptor eltwise_add_desc{ ConvertPolicy::WRAP };
+ const TileDescriptor store_tile_info{};
+
+ // Create reference
+ SimpleTensor<float> ref_t_lhs{ t_lhs_shape, data_type, 1 };
+ SimpleTensor<float> ref_t_rhs{ t_rhs_shape, data_type, 1 };
+ SimpleTensor<float> ref_t_bias_placeholder{ t_dst_shape, data_type, 1 };
+ SimpleTensor<float> ref_t_l1_addend{ t_dst_shape, data_type, 1 };
+
+ // Fill reference
+ fill<float>(ref_t_lhs, 0);
+ fill<float>(ref_t_rhs, 1);
+ fill<float>(ref_t_l1_addend, 2);
+ const auto ref_t_dst = reference::arithmetic_operation(
+ ArithmeticOperation::ADD,
+ ref_t_l1_addend,
+ reference::gemm(ref_t_lhs, ref_t_rhs, ref_t_bias_placeholder, gemm_native_desc.alpha, 0.f /* To disable bias */),
+ data_type,
+ eltwise_add_desc.convert_policy);
+
+ CLScheduler::get().default_init();
+
+ /* Condition 0: Dynamic Fused Kernel */
+ CLTensor cond0_t_dst{};
+ {
+ TICK(cond0_0_startup_time);
+
+ ClKernelBlueprint bp;
+ ArgumentID tid_lhs;
+ ArgumentID tid_rhs;
+ ArgumentID tid_l0_bias = g_arg_placeholder;
+ ArgumentID tid_l1_addend;
+ ArgumentID tid_dst;
+
+ const ClTensorDescriptor t_lhs_desc{ &t_lhs_info, 2 };
+ const ClTensorDescriptor t_rhs_desc{ &t_rhs_info, 2 };
+ const ClTensorDescriptor t_bias_desc{ &t_bias_info, 2 };
+ const ClTensorDescriptor t_addend_desc{ &t_dst_info, 2 };
+ const ClTensorDescriptor t_dst_desc{ &t_dst_info, 2 };
+
+ ClKernelCode cl_code;
+ TICK(cond0_build_time)
+ auto st = add_tensor_argument(bp, t_lhs_desc, tid_lhs);
+ st = add_tensor_argument(bp, t_rhs_desc, tid_rhs);
+ st = add_tensor_argument(bp, t_addend_desc, tid_l1_addend);
+ st = add_tensor_argument(bp, t_dst_desc, tid_dst);
+
+ ArgumentID tid_acc;
+ st = add_tensor_intermed(bp, tid_acc);
+ st = add_kcomp_gemm_native(bp, common_kernel_desc, gemm_native_desc, tid_lhs, tid_rhs, tid_l0_bias, tid_acc);
+
+ st = add_kcomp_eltwise_add(bp, common_kernel_desc, EltwiseAddDescriptor{}, tid_l1_addend, tid_acc, tid_acc);
+
+ st = add_kcomp_store(bp, common_kernel_desc, tid_acc, tid_dst, StoreType::StoreBlockBoundaryAware);
+
+ st = set_tile_info(bp, store_tile_info);
+ st = build(cl_code, ClCodeBuilderContext{ GpuInfo{ GPUTarget::G71 } }, bp);
+ set_build_options(cl_code, gemm_native_desc, t_lhs_info, t_rhs_info, nullptr, t_dst_info);
+ ElementsProcessed num_elements_processed{};
+ auto win_config = mock_gemm_native_validate_and_configure_window(&t_lhs_info, &t_rhs_info, nullptr, &t_dst_info, gemm_native_desc.lhs_info, gemm_native_desc.rhs_info, gemm_info,
+ num_elements_processed);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ cl_code.window = win_config.second;
+ TOCK(cond0_build_time, measurements)
+
+ TICK(cond0_tune_time)
+ ClExecutionDescriptor exec_desc;
+ st = tune_static(exec_desc, cl_code);
+ TOCK(cond0_tune_time, measurements)
+
+ TICK(cond0_configure_time)
+ ClCompositeKernel kernel;
+ kernel.configure(CLKernelLibrary::get().get_compile_context(), cl_code);
+ TOCK(cond0_configure_time, measurements)
+
+ // Construct tensors
+ CLTensor t_lhs{};
+ CLTensor t_rhs{};
+ CLTensor t_l1_addend{};
+
+ // Init tensors
+ {
+ t_lhs.allocator()->init(t_lhs_info);
+ t_rhs.allocator()->init(t_rhs_info);
+ t_l1_addend.allocator()->init(t_dst_info);
+ cond0_t_dst.allocator()->init(t_dst_info);
+ }
+ // Allocate tensors
+ {
+ t_lhs.allocator()->allocate();
+ t_rhs.allocator()->allocate();
+ t_l1_addend.allocator()->allocate();
+ cond0_t_dst.allocator()->allocate();
+ fill<float>(CLAccessor(t_lhs), 0);
+ fill<float>(CLAccessor(t_rhs), 1);
+ fill<float>(CLAccessor(t_l1_addend), 2);
+ }
+
+ // "Pack" tensors
+ TensorBinding tensors({ { tid_lhs, &t_lhs }, { tid_rhs, &t_rhs }, { tid_l1_addend, &t_l1_addend }, { tid_dst, &cond0_t_dst } });
+
+ CLScheduler::get().enqueue_op(kernel, tensors, exec_desc, true);
+ CLScheduler::get().sync();
+ TOCK(cond0_0_startup_time, measurements)
+
+ TICK(cond0_1_latency)
+ for(int i = 0; i < num_iterations; ++i)
+ {
+ CLScheduler::get().enqueue_op(kernel, tensors, exec_desc, true);
+ }
+ CLScheduler::get().sync();
+ TOCK_AVG(cond0_1_latency, measurements, num_iterations)
+ }
+ /* Condition 1: Dynamic Unfused Kernel */
+ /* Condition 2: Static Fused Kernel (current) */
+ CLTensor cond2_t_dst{};
+ {
+ TICK(cond2_0_startup_time);
+ arm_compute::opencl::kernels::ClGemmMatrixMultiplyNativeKernel l0_gemm_mm;
+
+ TICK(cond2_configure_time);
+ experimental::PostOpList<ITensorInfo *> post_ops;
+ post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo *>>(&t_dst_info, 1, eltwise_add_desc.convert_policy);
+ GEMMKernelInfo gemm_info{ m, n, k, 0, false, false, false, false, ActivationLayerInfo{}, 1, 1, gemm_native_desc.lhs_info, gemm_native_desc.rhs_info, 0, 0, post_ops };
+ l0_gemm_mm.configure(CLKernelLibrary::get().get_compile_context(), &t_lhs_info, &t_rhs_info, nullptr, &t_dst_info, gemm_native_desc.alpha, gemm_native_desc.beta, gemm_native_desc.lhs_info,
+ gemm_native_desc.rhs_info, gemm_info);
+ TOCK(cond2_configure_time, measurements);
+
+ // Construct tensors
+ CLTensor t_lhs{};
+ CLTensor t_rhs{};
+ CLTensor t_l1_addend{};
+
+ // Init tensors
+ {
+ t_lhs.allocator()->init(t_lhs_info);
+ t_rhs.allocator()->init(t_rhs_info);
+ t_l1_addend.allocator()->init(t_dst_info);
+ cond2_t_dst.allocator()->init(t_dst_info);
+ }
+ // Allocate tensors
+ {
+ t_lhs.allocator()->allocate();
+ t_rhs.allocator()->allocate();
+ t_l1_addend.allocator()->allocate();
+ cond2_t_dst.allocator()->allocate();
+ fill<float>(CLAccessor(t_lhs), 0);
+ fill<float>(CLAccessor(t_rhs), 1);
+ fill<float>(CLAccessor(t_l1_addend), 2);
+ }
+
+ // "Pack" tensors
+ ITensorPack tensors
+ {
+ { ACL_SRC_0, &t_lhs },
+ { ACL_SRC_1, &t_rhs },
+ { EXPERIMENTAL_ACL_POST_OP_ARG_FIRST, &t_l1_addend },
+ { ACL_DST, &cond2_t_dst },
+ };
+ CLScheduler::get().enqueue_op(l0_gemm_mm, tensors, true);
+ CLScheduler::get().sync();
+ TOCK(cond2_0_startup_time, measurements);
+
+ TICK(cond2_1_latency);
+ for(int i = 0; i < num_iterations; ++i)
+ {
+ CLScheduler::get().enqueue_op(l0_gemm_mm, tensors, true);
+ }
+ CLScheduler::get().sync();
+ TOCK_AVG(cond2_1_latency, measurements, num_iterations);
+ }
+ /* Condition 3: Static Unfused Kernel (current) */
+ CLTensor cond3_t_dst{};
+ {
+ TICK(cond3_0_startup_time);
+ arm_compute::opencl::kernels::ClGemmMatrixMultiplyNativeKernel l0_gemm_mm;
+ arm_compute::opencl::kernels::ClSaturatedArithmeticKernel l1_add;
+
+ TICK(cond3_configure_time);
+ GEMMKernelInfo gemm_info{ m, n, k, 0, false, false, false, false, ActivationLayerInfo{}, 1, 1, gemm_native_desc.lhs_info, gemm_native_desc.rhs_info, 0, 0 };
+ l0_gemm_mm.configure(CLKernelLibrary::get().get_compile_context(), &t_lhs_info, &t_rhs_info, nullptr, &t_l0_dst_info, gemm_native_desc.alpha, gemm_native_desc.beta, gemm_native_desc.lhs_info,
+ gemm_native_desc.rhs_info, gemm_info);
+ l1_add.configure(CLKernelLibrary::get().get_compile_context(), ArithmeticOperation::ADD, &t_l0_dst_info, &t_l1_rhs_info, &t_dst_info, eltwise_add_desc.convert_policy);
+ TOCK(cond3_configure_time, measurements);
+
+ // Construct tensors
+ CLTensor t_lhs{};
+ CLTensor t_rhs{};
+ CLTensor t_l0_dst{};
+ CLTensor t_l1_addend{};
+
+ // Init tensors
+ {
+ t_lhs.allocator()->init(t_lhs_info);
+ t_rhs.allocator()->init(t_rhs_info);
+ t_l0_dst.allocator()->init(t_l0_dst_info);
+ t_l1_addend.allocator()->init(t_dst_info);
+ cond3_t_dst.allocator()->init(t_dst_info);
+ }
+ // Allocate tensors
+ {
+ t_lhs.allocator()->allocate();
+ t_rhs.allocator()->allocate();
+ t_l0_dst.allocator()->allocate();
+ t_l1_addend.allocator()->allocate();
+ cond3_t_dst.allocator()->allocate();
+ fill<float>(CLAccessor(t_lhs), 0);
+ fill<float>(CLAccessor(t_rhs), 1);
+ fill<float>(CLAccessor(t_l1_addend), 2);
+ }
+
+ // "Pack" tensors
+ ITensorPack tensors_l0
+ {
+ { ACL_SRC_0, &t_lhs },
+ { ACL_SRC_1, &t_rhs },
+ { ACL_DST, &t_l0_dst },
+ };
+ ITensorPack tensors_l1
+ {
+ { ACL_SRC_0, &t_l0_dst },
+ { ACL_SRC_1, &t_l1_addend },
+ { ACL_DST, &cond3_t_dst },
+ };
+ CLScheduler::get().enqueue_op(l0_gemm_mm, tensors_l0, true);
+ CLScheduler::get().enqueue_op(l1_add, tensors_l1, true);
+ CLScheduler::get().sync();
+ TOCK(cond3_0_startup_time, measurements);
+
+ TICK(cond3_1_latency);
+ for(int i = 0; i < num_iterations; ++i)
+ {
+ CLScheduler::get().enqueue_op(l0_gemm_mm, tensors_l0, true);
+ CLScheduler::get().enqueue_op(l1_add, tensors_l1, true);
+ }
+ CLScheduler::get().sync();
+ TOCK_AVG(cond3_1_latency, measurements, num_iterations);
+ }
+
+ RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
+ std::cout << "cond0 validation: " << std::endl;
+ validate(CLAccessor(cond0_t_dst), ref_t_dst, tolerance_f32);
+ std::cout << "cond2 validation: " << std::endl;
+ validate(CLAccessor(cond2_t_dst), ref_t_dst, tolerance_f32);
+ std::cout << "cond3 validation: " << std::endl;
+ validate(CLAccessor(cond3_t_dst), ref_t_dst, tolerance_f32);
+
+ /* Report */
+ std::cout << "Performance comparison (gemm native + add)" << std::endl;
+ std::cout << "cond0: dynamic fusion module" << std::endl;
+ std::cout << "cond2: static fused with post ops" << std::endl;
+ std::cout << "cond3: static unfused" << std::endl;
+ for(auto m : measurements)
+ {
+ std::cout << m.first << ": " << m.second.count() << "us" << std::endl;
+ }
+}
+TEST_SUITE_END() // Benchmark
+TEST_SUITE_END() // ClCompositeKernel
+TEST_SUITE_END() // DYNAMIC_FUSION
+TEST_SUITE_END() // UNIT
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) \ No newline at end of file