aboutsummaryrefslogtreecommitdiff
path: root/src/dynamic_fusion/sketch/gpu/operators
diff options
context:
space:
mode:
Diffstat (limited to 'src/dynamic_fusion/sketch/gpu/operators')
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuAdd.cpp72
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp172
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuClamp.cpp172
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp270
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp278
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuMatMul.cpp245
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuMul.cpp70
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp134
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp208
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuReshape.cpp163
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp195
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuSigmoid.cpp164
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp200
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuSub.cpp71
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuTanh.cpp163
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp200
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h117
17 files changed, 2894 insertions, 0 deletions
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuAdd.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuAdd.cpp
new file mode 100644
index 0000000000..201c9f243c
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuAdd.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h"
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+
+#include "src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+Status GpuAdd::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *lhs, const ITensorInfo *rhs)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F16, DataType::F32, DataType::U8,
+ DataType::S16, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->data_type() != rhs->data_type(), "Input tensors must be the same data type");
+
+ // Set the elementwise operation to Add then call the elementwise common validate_op
+ ElementwiseBinaryCommonAttributes common_attributes{};
+ common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Add);
+ return GpuElementwiseBinaryCommon::validate_op(sketch, lhs, rhs, common_attributes);
+}
+
+Status GpuAdd::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *lhs, const ITensorInfo *rhs)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->data_type() != rhs->data_type(), "Input tensors must be the same data type");
+
+ // Set the elementwise operation to Add then call the elementwise common is_supported_op
+ ElementwiseBinaryCommonAttributes common_attributes{};
+ common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Add);
+ return GpuElementwiseBinaryCommon::is_supported_op(context, lhs, rhs, common_attributes);
+}
+
+ITensorInfo *GpuAdd::create_op(GpuWorkloadSketch &sketch, ITensorInfo *lhs, ITensorInfo *rhs)
+{
+ // No need to log or validate as they'll be handled inside GpuElementwiseBinaryCommon::create_op()
+ // Set the elementwise operation to Add then call the elementwise common create_op
+ ElementwiseBinaryCommonAttributes common_attributes{};
+ common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Add);
+ return GpuElementwiseBinaryCommon::create_op(sketch, lhs, rhs, common_attributes);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp
new file mode 100644
index 0000000000..d25a2a3153
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const CastAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ auto_init_if_empty(dst_info_to_validate, src->clone()->set_data_type(attributes.data_type()));
+
+ // Check support level
+ // Data Type
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst_info_to_validate_ptr, 1, DataType::F16, DataType::F32);
+
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+ // Validate Cast Component
+ {
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+ auto settings = ClComponentCast::Settings();
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentCast::validate(properties, arguments, attributes, settings));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+
+ return Status{};
+}
+constexpr GpuOperatorType operator_type = GpuOperatorType::Simple;
+} // namespace
+
+Status
+GpuCast::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *src, const CastAttributes &attributes)
+{
+ return is_supported_op_helper(context, src, nullptr, attributes);
+}
+
+Status GpuCast::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *src, const CastAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+
+ // Refer to GpuConv2d::validate_op() for id-validness of this TensorInfo object
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ auto_init_if_empty(dst_info_to_validate, src->clone()->set_data_type(attributes.data_type()));
+
+ // Perform fusion test
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), src, &dst_info_to_validate, attributes);
+}
+
+ITensorInfo *GpuCast::create_op(GpuWorkloadSketch &sketch, ITensorInfo *src, const CastAttributes &attributes)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_LOG_PARAMS(src, attributes);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuCast::validate_op(sketch, src, attributes));
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
+
+ // Auto initialize dst tensor info if empty
+ auto_init_if_empty(*dst, src->clone()->set_data_type(attributes.data_type()));
+
+ // Translate into components and add to component graph
+ GpuKernelComponentGraph &comp_graph = sketch.implementation().component_graph();
+ const auto *sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ ARM_COMPUTE_ERROR_ON(sketch_ctx->cl_compile_context() == nullptr);
+
+ // Add Depthwise Conv2d Component
+ {
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+ auto settings = ClComponentCast::Settings();
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentCast>(properties, arguments, attributes, settings);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuClamp.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuClamp.cpp
new file mode 100644
index 0000000000..4d6e7f81bb
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuClamp.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuClamp.h"
+
+#include "arm_compute/core/experimental/Types.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentActivation.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const ClampAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(attributes.max_val() < attributes.min_val(),
+ "Maximum clamp value cannot be lower than minimum value");
+
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ auto_init_if_empty(dst_info_to_validate, *src->clone());
+
+ // CLAMP operator is implemented as LU_BOUNDED_RELU with the alpha and beta variables swapped
+ const ClComponentActivation::Attributes act_info{ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
+ attributes.max_val(), attributes.min_val()};
+
+ // Check components
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ // Validate Activation Component
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC, src);
+ arguments.add_const_tensor(ACL_DST, dst_info_to_validate_ptr);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentActivation::validate(properties, arguments, act_info));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+ return Status{};
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Simple;
+} // namespace
+
+Status
+GpuClamp::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *src, const ClampAttributes &attributes)
+{
+ return is_supported_op_helper(context, src, nullptr, attributes);
+}
+
+Status GpuClamp::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *src, const ClampAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+
+ // Check if tensors have valid id, i.e. they are created from a sketch
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+
+ // Refer to GpuConv2d::validate_op() for id-validness of this TensorInfo object
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ auto_init_if_empty(dst_info_to_validate, *src->clone());
+
+ // Perform fusion test to check if the operator meets fusion constraints
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC, src);
+ tensors.add_const_tensor(ACL_DST, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), src, &dst_info_to_validate, attributes);
+}
+
+ITensorInfo *GpuClamp::create_op(GpuWorkloadSketch &sketch, ITensorInfo *src, const ClampAttributes &attributes)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_LOG_PARAMS(src, attributes);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuClamp::validate_op(sketch, src, attributes));
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
+
+ // Auto initialize dst tensor
+ auto_init_if_empty(*dst, *src->clone());
+
+ // Translate into components and add to component graph
+ GpuKernelComponentGraph &comp_graph = sketch.implementation().component_graph();
+
+ // CLAMP operator is implemented as LU_BOUNDED_RELU with the alpha and beta variables swapped
+ const ClComponentActivation::Attributes act_info{ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
+ attributes.max_val(), attributes.min_val()};
+
+ const auto *const sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ // Add Activation Component
+ auto properties = IGpuKernelComponent::Properties();
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC, src);
+ arguments.add_const_tensor(ACL_DST, dst);
+ comp_graph.add_new_component<ClComponentActivation>(properties, arguments, act_info);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC, src);
+ tensors.add_const_tensor(ACL_DST, dst);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp
new file mode 100644
index 0000000000..aaeec543f8
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h"
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDirectConv2d.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+#include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h"
+#include "src/runtime/heuristics/direct_conv/ClDirectConvKernelConfig.h"
+#include "src/runtime/heuristics/direct_conv/IClDirectConvKernelConfig.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+DirectConvComputeKernelInfo
+config_direct_convolution_nhwc(const ITensorInfo *src, const ITensorInfo *weights, const PadStrideInfo &conv_info)
+{
+ // Get GPU target
+ GPUTarget gpu_target = CLScheduler::get().target();
+
+ std::unique_ptr<arm_compute::cl_direct_conv::IClDirectConvKernelConfig> t =
+ arm_compute::cl_direct_conv::ClDirectConvKernelConfigurationFactory::create(gpu_target);
+
+ return t->configure(src, weights, conv_info);
+}
+
+void calculate_and_init_dst_if_empty(ITensorInfo *dst,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const Conv2dAttributes &attributes)
+{
+ if (dst->total_size() == 0U)
+ {
+ const auto shape = misc::shape_calculator::compute_deep_convolution_shape(
+ src->tensor_shape(), src->data_layout(), wei->tensor_shape(),
+ PadStrideInfo(attributes.stride().x(), attributes.stride().y(), attributes.pad().left,
+ attributes.pad().right, attributes.pad().top, attributes.pad().bottom,
+ DimensionRoundingType::FLOOR)); // use the default DimensionRoundingType
+
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(shape));
+ }
+}
+
+/* A helper method to reduce the duplication in dst tensor initialization
+* when calling validate()
+*/
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const ITensorInfo *bia,
+ const ITensorInfo *dst,
+ const Conv2dAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei);
+
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, wei, attributes);
+
+ // Check support level
+ // Data type
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+ // Data layout
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
+
+ // Check components
+ const auto gpu_target = context.gpu_target();
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+ // Validate Direct Conv2d Component
+ {
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+ auto settings = ClComponentDirectConv2d::Settings();
+
+ settings.fast_relaxed_math(
+ (gpu_target != GPUTarget::G71 && (gpu_target & GPUTarget::GPU_ARCH_MASK) == GPUTarget::BIFROST) &&
+ (dst_info_to_validate_ptr->data_type() == DataType::F32 ||
+ dst_info_to_validate_ptr->data_type() == DataType::F16));
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_SRC_1, wei);
+ arguments.add_const_tensor(ACL_SRC_2, bia);
+ arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentDirectConv2d::validate(properties, arguments, attributes, settings));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+ return Status{};
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+} // namespace
+
+Status GpuConv2d::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const ITensorInfo *bia,
+ const Conv2dAttributes &attributes)
+{
+ return is_supported_op_helper(context, src, wei, bia, nullptr, attributes);
+}
+
+Status GpuConv2d::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const ITensorInfo *bia,
+ const Conv2dAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!wei->are_values_constant(), "Dynamic weights are not supported");
+
+ // Check if tensors have valid id. I.e. they are created from a sketch
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !wei->has_valid_id());
+ if (bia != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(!bia->has_valid_id());
+ }
+
+ // This tensor info will have invalid id but because all the existing tensors in the
+ // sketch have valid ids and the DependencyGraph implementation has no notion of validness
+ // regarding tensor ids, it'll be just another tensor id and will validate
+ // Additionally, a new dst id is added every time in create_op, thus there's no need to validate it
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, wei, attributes);
+
+ // Perform fusion test
+ // Check if operator meets fusion constraints
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_SRC_1, wei);
+ tensors.add_const_tensor(ACL_SRC_2, bia);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), src, wei, bia, &dst_info_to_validate, attributes);
+}
+
+ITensorInfo *GpuConv2d::create_op(
+ GpuWorkloadSketch &sketch, ITensorInfo *src, ITensorInfo *wei, ITensorInfo *bia, const Conv2dAttributes &attributes)
+{
+ ARM_COMPUTE_LOG_PARAMS(src, wei, bia, attributes);
+ PadStrideInfo conv_info(attributes.stride().x(), attributes.stride().y(), attributes.pad().left,
+ attributes.pad().right, attributes.pad().top, attributes.pad().bottom,
+ DimensionRoundingType::FLOOR);
+ // Initialize the direct convolution descriptor
+ const DirectConvComputeKernelInfo desc = config_direct_convolution_nhwc(src, wei, conv_info);
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+
+ // Assert validation
+ ARM_COMPUTE_ERROR_THROW_ON(GpuConv2d::validate_op(sketch, src, wei, bia, attributes));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, wei, dst);
+
+ // Auto initialize dst tensor
+ calculate_and_init_dst_if_empty(dst, src, wei, attributes);
+
+ // Translate into components and add to component graph
+ auto &comp_graph = sketch.implementation().component_graph();
+
+ const auto sketch_ctx = sketch.implementation().context();
+
+ const auto gpu_target = sketch_ctx->gpu_target();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = sketch_ctx->cl_compile_context();
+ ARM_COMPUTE_ERROR_ON(cl_compile_ctx == nullptr);
+ ARM_COMPUTE_UNUSED(cl_compile_ctx);
+
+ // Add Direct Conv2d Component
+ {
+ auto properties = IGpuKernelComponent::Properties();
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ auto settings = ClComponentDirectConv2d::Settings();
+
+ settings.fast_relaxed_math(
+ (gpu_target != GPUTarget::G71 && (gpu_target & GPUTarget::GPU_ARCH_MASK) == GPUTarget::BIFROST) &&
+ (dst->data_type() == DataType::F32 || dst->data_type() == DataType::F16));
+
+ settings.direct_conv_descriptor(desc);
+
+ if (settings.export_to_cl_image())
+ {
+ arm_compute::opencl::kernels::gemm::update_padding_for_cl_image(wei);
+ }
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_SRC_1, wei);
+ arguments.add_const_tensor(ACL_SRC_2, bia);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentDirectConv2d>(properties, arguments, attributes, settings);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_SRC_1, wei);
+ tensors.add_const_tensor(ACL_SRC_2, bia);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp
new file mode 100644
index 0000000000..e2b673bd43
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h"
+
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDepthwiseConv2d.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+#include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h"
+#include "src/runtime/heuristics/dwc_native/ClDWCNativeKernelConfig.h"
+#include "src/runtime/heuristics/dwc_native/IClDWCNativeKernelConfig.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+void calculate_and_init_dst_if_empty(ITensorInfo *dst,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const DepthwiseConv2dAttributes &attributes)
+{
+ if (dst->total_size() == 0U)
+ {
+ const PadStrideInfo pad_stride_info(attributes.stride().x(), attributes.stride().y(), attributes.pad().left,
+ attributes.pad().right, attributes.pad().top, attributes.pad().bottom,
+ attributes.dimension_rounding_type());
+
+ const ConvolutionInfo conv_info{pad_stride_info, attributes.depth_multiplier(), ActivationLayerInfo(),
+ attributes.dilation()};
+ const TensorShape shape = misc::shape_calculator::compute_depthwise_convolution_shape(*src, *wei, conv_info);
+
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(shape));
+ }
+}
+
+/* A helper method to reduce the duplication in dst tensor initialization
+* when calling validate()
+*/
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const ITensorInfo *bia,
+ const ITensorInfo *dst,
+ const DepthwiseConv2dAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei);
+
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, wei, attributes);
+
+ // Check support level
+ // Data type
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+ // Data layout
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
+
+ const GpuTarget gpu_target = context.gpu_target();
+
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const CLCompileContext *cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+
+ // Validate Depthwise Conv2d Component
+ {
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+ auto settings = ClComponentDepthwiseConv2d::Settings();
+
+ const PadStrideInfo legacy_conv_info(attributes.stride().x(), attributes.stride().y(),
+ attributes.pad().left, attributes.pad().right, attributes.pad().top,
+ attributes.pad().bottom, DimensionRoundingType::FLOOR);
+
+ // Get the depthwise convolution compute parameters
+ auto t = arm_compute::cl_dwc::ClDWCNativeKernelConfigurationFactory::create(gpu_target);
+ const DWCComputeKernelInfo dwc_info =
+ t->configure(src, wei, legacy_conv_info, attributes.dilation(), attributes.depth_multiplier());
+
+ settings.fast_relaxed_math(
+ (gpu_target != GPUTarget::G71 && (gpu_target & GPUTarget::GPU_ARCH_MASK) == GPUTarget::BIFROST) &&
+ (dst_info_to_validate_ptr->data_type() == DataType::F32 ||
+ dst_info_to_validate_ptr->data_type() == DataType::F16));
+
+ settings.is_fma_available(get_arch_from_target(gpu_target) == GPUTarget::MIDGARD)
+ .m0(dwc_info.m0)
+ .n0(dwc_info.n0)
+ .export_input_to_cl_image(dwc_info.export_input_to_cl_image)
+ .export_weights_to_cl_image(dwc_info.export_weights_to_cl_image);
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_SRC_1, wei);
+ arguments.add_const_tensor(ACL_SRC_2, bia);
+ arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr);
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ ClComponentDepthwiseConv2d::validate(properties, arguments, attributes, settings));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+
+ return Status{};
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+} // namespace
+
+Status GpuDepthwiseConv2d::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const ITensorInfo *bia,
+ const DepthwiseConv2dAttributes &attributes)
+{
+ return is_supported_op_helper(context, src, wei, bia, nullptr, attributes);
+}
+
+Status GpuDepthwiseConv2d::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const ITensorInfo *bia,
+ const DepthwiseConv2dAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!wei->are_values_constant(), "Dynamic weights are not supported");
+
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !wei->has_valid_id());
+
+ if (bia != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(!bia->has_valid_id());
+ }
+
+ // Refer to GpuConv2d::validate_op() for id-validness of this TensorInfo object
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, wei, attributes);
+
+ // Perform fusion test
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_SRC_1, wei);
+ tensors.add_const_tensor(ACL_SRC_2, bia);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), src, wei, bia, &dst_info_to_validate, attributes);
+}
+
+ITensorInfo *GpuDepthwiseConv2d::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *wei,
+ ITensorInfo *bia,
+ const DepthwiseConv2dAttributes &attributes)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, wei);
+ ARM_COMPUTE_LOG_PARAMS(src, wei, bia, attributes);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuDepthwiseConv2d::validate_op(sketch, src, wei, bia, attributes));
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
+
+ calculate_and_init_dst_if_empty(dst, src, wei, attributes);
+
+ // Translate into components and add to component graph
+ GpuKernelComponentGraph &comp_graph = sketch.implementation().component_graph();
+ const auto *sketch_ctx = sketch.implementation().context();
+ const GpuTarget gpu_target = sketch_ctx->gpu_target();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(sketch_ctx->cl_compile_context());
+
+ // Add Depthwise Conv2d Component
+ {
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+ auto settings = ClComponentDepthwiseConv2d::Settings();
+
+ const PadStrideInfo legacy_conv_info(attributes.stride().x(), attributes.stride().y(),
+ attributes.pad().left, attributes.pad().right, attributes.pad().top,
+ attributes.pad().bottom, DimensionRoundingType::FLOOR);
+
+ // Get the depthwise convolution compute parameters
+ auto t = arm_compute::cl_dwc::ClDWCNativeKernelConfigurationFactory::create(gpu_target);
+ const DWCComputeKernelInfo dwc_info =
+ t->configure(src, wei, legacy_conv_info, attributes.dilation(), attributes.depth_multiplier());
+
+ settings.is_fma_available(get_arch_from_target(gpu_target) != GPUTarget::MIDGARD)
+ .m0(dwc_info.m0)
+ .n0(dwc_info.n0)
+ .export_input_to_cl_image(dwc_info.export_input_to_cl_image)
+ .export_weights_to_cl_image(dwc_info.export_weights_to_cl_image);
+
+ if (settings.export_input_to_cl_image())
+ {
+ arm_compute::opencl::kernels::gemm::update_padding_for_cl_image(src);
+ }
+
+ if (settings.export_weights_to_cl_image())
+ {
+ arm_compute::opencl::kernels::gemm::update_padding_for_cl_image(wei);
+ }
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_SRC_1, wei);
+ arguments.add_const_tensor(ACL_SRC_2, bia);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentDepthwiseConv2d>(properties, arguments, attributes, settings);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_SRC_1, wei);
+ tensors.add_const_tensor(ACL_SRC_2, bia);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuMatMul.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuMatMul.cpp
new file mode 100644
index 0000000000..2997b28ec1
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuMatMul.cpp
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuMatMul.h"
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentMatMul.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+void calculate_and_init_dst_if_empty(ITensorInfo *dst,
+ const ITensorInfo *lhs,
+ const ITensorInfo *rhs,
+ const MatMulAttributes &attributes,
+ const GpuMatMulSettings &settings)
+{
+ ARM_COMPUTE_UNUSED(attributes);
+
+ if (dst->total_size() == 0U)
+ {
+ const auto dst_shape = misc::shape_calculator::compute_matmul_shape(
+ lhs->tensor_shape(), rhs->tensor_shape(),
+ MatMulKernelInfo(attributes.adj_lhs(), attributes.adj_rhs(), settings.m0(), settings.n0(), settings.k0()));
+
+ auto_init_if_empty(*dst, lhs->clone()->set_tensor_shape(dst_shape));
+ }
+}
+
+/* A helper method to reduce the duplication in dst tensor initialization
+* when calling validate()
+*/
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+ const ITensorInfo *lhs,
+ const ITensorInfo *rhs,
+ const ITensorInfo *dst,
+ const MatMulAttributes &attributes,
+ const GpuMatMulSettings &settings)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, lhs, rhs, attributes, settings);
+
+ // Check support level
+ // Data type
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F16, DataType::F32);
+
+ // Check components
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+ // Validate MatMul Component
+ {
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, lhs);
+ arguments.add_const_tensor(ACL_SRC_1, rhs);
+ arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentMatMul::validate(properties, arguments, attributes, settings));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+ return Status{};
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+} // namespace
+
+int GpuMatMulSettings::n0() const
+{
+ return _n0;
+}
+
+GpuMatMulSettings &GpuMatMulSettings::n0(int n0)
+{
+ _n0 = n0;
+ return *this;
+}
+
+int GpuMatMulSettings::m0() const
+{
+ return _m0;
+}
+
+GpuMatMulSettings &GpuMatMulSettings::m0(int m0)
+{
+ _m0 = m0;
+ return *this;
+}
+
+int GpuMatMulSettings::k0() const
+{
+ return _k0;
+}
+
+GpuMatMulSettings &GpuMatMulSettings::k0(int k0)
+{
+ _k0 = k0;
+ return *this;
+}
+
+Status GpuMatMul::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *lhs,
+ const ITensorInfo *rhs,
+ const MatMulAttributes &attributes,
+ const GpuMatMulSettings &settings)
+{
+ return is_supported_op_helper(context, lhs, rhs, nullptr, attributes, settings);
+}
+
+Status GpuMatMul::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *lhs,
+ const ITensorInfo *rhs,
+ const MatMulAttributes &attributes,
+ const GpuMatMulSettings &settings)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+
+ // Check if tensors have valid id. I.e. they are created from a sketch
+ ARM_COMPUTE_RETURN_ERROR_ON(!lhs->has_valid_id() || !rhs->has_valid_id());
+
+ // Refer to GpuMatmul::validate_op() for id-validness of this TensorInfo object
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, lhs, rhs, attributes, settings);
+
+ // Perform fusion test
+ // Check if operator meets fusion constraints
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, lhs);
+ tensors.add_const_tensor(ACL_SRC_1, rhs);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), lhs, rhs, &dst_info_to_validate, attributes, settings);
+}
+
+ITensorInfo *GpuMatMul::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *lhs,
+ ITensorInfo *rhs,
+ const Attributes &attributes,
+ const Settings &settings)
+{
+ ARM_COMPUTE_LOG_PARAMS(lhs, rhs, attributes, settings);
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+
+ // Assert validation
+ ARM_COMPUTE_ERROR_THROW_ON(GpuMatMul::validate_op(sketch, lhs, rhs, attributes, settings));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst);
+
+ // Auto initialize dst tensor
+ calculate_and_init_dst_if_empty(dst, lhs, rhs, attributes, settings);
+
+ // Translate into components and add to component graph
+ auto &comp_graph = sketch.implementation().component_graph();
+ const auto sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ auto properties = IGpuKernelComponent::Properties();
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, lhs);
+ arguments.add_const_tensor(ACL_SRC_1, rhs);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentMatMul>(properties, arguments, attributes, settings);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, lhs);
+ tensors.add_const_tensor(ACL_SRC_1, rhs);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuMul.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuMul.cpp
new file mode 100644
index 0000000000..b871171e8d
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuMul.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuMul.h"
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+
+#include "src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+Status GpuMul::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *lhs, const ITensorInfo *rhs)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->data_type() != rhs->data_type(), "Input tensors must be the same data type");
+
+ // Set the elementwise operation to Mul then call the elementwise common validate_op
+ ElementwiseBinaryCommonAttributes common_attributes{};
+ common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Mul);
+ return GpuElementwiseBinaryCommon::validate_op(sketch, lhs, rhs, common_attributes);
+}
+
+Status GpuMul::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *lhs, const ITensorInfo *rhs)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->data_type() != rhs->data_type(), "Input tensors must be the same data type");
+
+ // Set the elementwise operation to Mul then call the elementwise common is_supported_op
+ ElementwiseBinaryCommonAttributes common_attributes{};
+ common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Mul);
+ return GpuElementwiseBinaryCommon::is_supported_op(context, lhs, rhs, common_attributes);
+}
+
+ITensorInfo *GpuMul::create_op(GpuWorkloadSketch &sketch, ITensorInfo *lhs, ITensorInfo *rhs)
+{
+ // Set the elementwise operation to Mul then call the elementwise common create_op
+ ElementwiseBinaryCommonAttributes common_attributes{};
+ common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Mul);
+ return GpuElementwiseBinaryCommon::create_op(sketch, lhs, rhs, common_attributes);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
new file mode 100644
index 0000000000..f0d368d757
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+#include "src/dynamic_fusion/utils/Utils.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+constexpr GpuOperatorType operator_type = GpuOperatorType::Simple;
+} // namespace
+
+Status GpuOutput::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *src, const ITensorInfo *dst)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+
+ // Initialize the destination tensor info.
+ TensorInfo dst_to_validate = *dst;
+ auto_init_if_empty(dst_to_validate, *src);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, &dst_to_validate);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, &dst_to_validate);
+
+ ARM_COMPUTE_UNUSED(context);
+ return Status{};
+}
+
+Status GpuOutput::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *src, const ITensorInfo *dst)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+ ARM_COMPUTE_RETURN_ERROR_ON(!is_alloc_tensor(dst));
+
+ // Initialize the destination tensor info.
+ TensorInfo dst_to_validate = *dst;
+ auto_init_if_empty(dst_to_validate, *src);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, &dst_to_validate);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, &dst_to_validate);
+
+ // Perform fusion test.
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_to_validate);
+
+ const auto group = sketch.implementation().operator_group();
+ const auto op = group.new_operator(operator_type, tensors);
+ const auto success = group.try_add_operator(op, true);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!success, "This operator cannot be fused into the workload.");
+
+ const auto status = is_supported_op(*sketch.gpu_context(), src, dst);
+ return status;
+}
+
+void GpuOutput::create_op(GpuWorkloadSketch &sketch, ITensorInfo *src, ITensorInfo *dst)
+{
+ ARM_COMPUTE_LOG_PARAMS(src, dst);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuOutput::validate_op(sketch, src, dst));
+
+ // Auto initialize dst tensor info if empty
+ auto_init_if_empty(*dst, *src);
+
+ // Translate into components and add to component graph
+ auto &comp_graph = sketch.implementation().component_graph();
+ const auto sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ ARM_COMPUTE_ERROR_ON(sketch_ctx->cl_compile_context() == nullptr);
+
+ // Add store component
+ {
+ IGpuKernelComponent::Properties properties;
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentStore>(properties, arguments);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op, true);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp
new file mode 100644
index 0000000000..2d04f75610
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h"
+
+#include "arm_compute/core/CL/CLCompileContext.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSourceCode.h"
+#include "src/dynamic_fusion/utils/Utils.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+void calculate_and_init_dst_if_empty(ITensorInfo *dst,
+ const ITensorInfo *src,
+ const Pool2dAttributes &attributes,
+ const GpuPool2dSettings &settings)
+{
+ ARM_COMPUTE_UNUSED(settings);
+
+ if (dst->total_size() == 0U)
+ {
+ auto shape = misc::shape_calculator::compute_pool_shape(
+ *src, convert_pool_attr_to_pool_info(attributes, /* mixed_precision */ true));
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(shape));
+ }
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+} // namespace
+
+GpuPool2dSettings GpuPool2dSettings::use_inf_as_limit(bool use_inf_as_limit)
+{
+ _use_inf_as_limit = use_inf_as_limit;
+ return *this;
+}
+
+bool GpuPool2dSettings::use_inf_as_limit() const
+{
+ return _use_inf_as_limit;
+}
+
+Status GpuPool2d::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const Pool2dAttributes &attributes,
+ const GpuPool2dSettings &settings)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+
+ // Auto initialize dst tensor info
+ TensorInfo dst_info_to_validate;
+
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, attributes, settings);
+
+ // Perform fusion test
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op(*sketch.gpu_context(), src, attributes, settings);
+}
+
+Status GpuPool2d::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const Pool2dAttributes &attributes,
+ const GpuPool2dSettings &settings)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+ // Data type
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+ // Data layout
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
+ // Check exclude padding is not false
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!attributes.exclude_padding(),
+ "Exclude padding must be set to true in Attributes!");
+
+ // Auto initialize dst tensor info
+ TensorInfo dst_info_to_validate;
+
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, attributes, settings);
+
+ // Check components
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+
+ // Validate Component
+ {
+ const KernelProperties properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentPool2d::validate(properties, arguments, attributes, settings));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+ return Status{};
+}
+
+ITensorInfo *GpuPool2d::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ const Pool2dAttributes &attributes,
+ const GpuPool2dSettings &settings)
+{
+ // Assert validation
+ ARM_COMPUTE_ERROR_THROW_ON(GpuPool2d::validate_op(sketch, src, attributes, settings));
+ ARM_COMPUTE_LOG_PARAMS(src, attributes, settings);
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
+
+ // Auto initialize dst tensor
+ calculate_and_init_dst_if_empty(dst, src, attributes, settings);
+
+ // Translate into components and add to component graph
+ auto &comp_graph = sketch.implementation().component_graph();
+
+ const auto sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = sketch_ctx->cl_compile_context();
+ ARM_COMPUTE_UNUSED(cl_compile_ctx);
+ ARM_COMPUTE_ERROR_ON(cl_compile_ctx == nullptr);
+
+ // Add Component
+ {
+ auto properties = IGpuKernelComponent::Properties();
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentPool2d>(properties, arguments, attributes, settings);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_tensor(ACL_DST_0, dst);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuReshape.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuReshape.cpp
new file mode 100644
index 0000000000..0e1f16e8ff
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuReshape.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuReshape.h"
+
+#include "arm_compute/core/Error.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentReshape.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const ReshapeAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ auto_init_if_empty(dst_info_to_validate, src->clone()->set_tensor_shape(attributes.shape()));
+
+ // Check components
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+
+ // Validate GpuReshape Component
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentReshape::validate(arguments));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+
+ return Status{ErrorCode::RUNTIME_ERROR, "GpuReshape is not Supported"};
+}
+
+GpuOperatorType operator_type = GpuOperatorType::Complex;
+} // namespace
+
+Status
+GpuReshape::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *src, const Attributes &attributes)
+{
+ return is_supported_op_helper(context, src, nullptr, attributes);
+}
+
+Status GpuReshape::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *src, const Attributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+
+ // Refer to GpuConv2d::validate_op() for id-validness of this TensorInfo object
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ auto_init_if_empty(dst_info_to_validate, src->clone()->set_tensor_shape(attributes.shape()));
+
+ // Perform fusion test
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), src, &dst_info_to_validate, attributes);
+}
+
+ITensorInfo *GpuReshape::create_op(GpuWorkloadSketch &sketch, ITensorInfo *src, const Attributes &attributes)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_LOG_PARAMS(src, attributes.shape());
+ ARM_COMPUTE_ERROR_THROW_ON(GpuReshape::validate_op(sketch, src, attributes));
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
+
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(attributes.shape()));
+
+ // Translate into components and add to component graph
+ auto &comp_graph = sketch.implementation().component_graph();
+ const auto sketch_ctx = sketch.implementation().context();
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = sketch_ctx->cl_compile_context();
+ ARM_COMPUTE_UNUSED(cl_compile_ctx);
+ ARM_COMPUTE_ERROR_ON(cl_compile_ctx == nullptr);
+
+ // Add ElementwiseBinary Component
+ {
+ auto properties = IGpuKernelComponent::Properties();
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentReshape>(properties, arguments);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_tensor(ACL_DST_0, dst);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp
new file mode 100644
index 0000000000..8e794c88b2
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+void calculate_and_init_dst_if_empty(ITensorInfo *dst, const ITensorInfo *src, const ResizeAttributes &attributes)
+{
+ if (dst->total_size() == 0U)
+ {
+ TensorShape out_shape = src->tensor_shape();
+
+ out_shape.set(1, attributes.output_width());
+ out_shape.set(2, attributes.output_height());
+
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(out_shape));
+ }
+}
+
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const ResizeAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, attributes);
+
+ // Check support level
+ // Data type
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+ // Data layout
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
+ // Interpolation policy
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(attributes.interpolation_policy() != InterpolationPolicy::NEAREST_NEIGHBOR &&
+ attributes.interpolation_policy() != InterpolationPolicy::BILINEAR,
+ "Interpolation policy must be NEAREST_NEIGHBOR or BILINEAR");
+
+ // Check components
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+
+ // Validate Activation Component
+ {
+ const KernelProperties properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentResize::validate(properties, arguments, attributes));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+
+ return Status{};
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+} // namespace
+
+Status
+GpuResize::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *src, const Attributes &attributes)
+{
+ return is_supported_op_helper(context, src, nullptr, attributes);
+}
+
+Status
+GpuResize::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *src, const GpuResize::Attributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+
+ // Refer to GpuConv2d::validate_op() for id-validness of this TensorInfo object
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, attributes);
+
+ // Perform fusion test
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), src, &dst_info_to_validate, attributes);
+}
+
+ITensorInfo *GpuResize::create_op(GpuWorkloadSketch &sketch, ITensorInfo *src, const GpuResize::Attributes &attributes)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_LOG_PARAMS(src, attributes);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuResize::validate_op(sketch, src, attributes));
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
+
+ // Auto initialize dst tensor info if empty
+ calculate_and_init_dst_if_empty(dst, src, attributes);
+
+ // Translate into components and add to component graph
+ GpuKernelComponentGraph &comp_graph = sketch.implementation().component_graph();
+ const auto *sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(sketch_ctx->cl_compile_context());
+
+ // Add Resize Component
+ {
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentResize>(properties, arguments, attributes);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuSigmoid.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuSigmoid.cpp
new file mode 100644
index 0000000000..a2260c8c36
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuSigmoid.cpp
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSigmoid.h"
+
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/function_info/ActivationLayerInfo.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentActivation.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+Status is_supported_op_helper(const GpuWorkloadContext &context, const ITensorInfo *src, const ITensorInfo *dst)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ auto_init_if_empty(dst_info_to_validate, *src->clone());
+
+ const ClComponentActivation::Attributes act_info{ActivationLayerInfo::ActivationFunction::LOGISTIC};
+
+ // Check components
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ // Validate Activation Component
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC, src);
+ arguments.add_const_tensor(ACL_DST, dst_info_to_validate_ptr);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentActivation::validate(properties, arguments, act_info));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+ return Status{};
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Simple;
+} // namespace
+
+Status GpuSigmoid::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *src)
+{
+ return is_supported_op_helper(context, src, nullptr);
+}
+
+Status GpuSigmoid::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *src)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+
+ // Check if tensors have valid id, i.e. they are created from a sketch
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+
+ // Refer to GpuConv2d::validate_op() for id-validness of this TensorInfo object
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ auto_init_if_empty(dst_info_to_validate, *src->clone());
+
+ // Perform fusion test to check if the operator meets fusion constraints
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC, src);
+ tensors.add_const_tensor(ACL_DST, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), src, &dst_info_to_validate);
+}
+
+ITensorInfo *GpuSigmoid::create_op(GpuWorkloadSketch &sketch, ITensorInfo *src)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_LOG_PARAMS(src);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuSigmoid::validate_op(sketch, src));
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
+
+ // Auto initialize dst tensor
+ auto_init_if_empty(*dst, *src->clone());
+
+ // Translate into components and add to component graph
+ GpuKernelComponentGraph &comp_graph = sketch.implementation().component_graph();
+
+ const ClComponentActivation::Attributes act_info{ActivationLayerInfo::ActivationFunction::LOGISTIC};
+
+ const auto *const sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ // Add Activation Component
+ auto properties = IGpuKernelComponent::Properties();
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC, src);
+ arguments.add_const_tensor(ACL_DST, dst);
+ comp_graph.add_new_component<ClComponentActivation>(properties, arguments, act_info);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC, src);
+ tensors.add_const_tensor(ACL_DST, dst);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp
new file mode 100644
index 0000000000..d385752201
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.h"
+
+#include "arm_compute/core/Error.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuOperatorProperties.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+GpuOperatorType operator_type = GpuOperatorType::Unfusable;
+} // namespace
+
+Status GpuSoftmax::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const Attributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ if (dst != nullptr)
+ {
+ dst_info_to_validate = *dst;
+ }
+ else
+ {
+ auto_init_if_empty(dst_info_to_validate, *src->clone());
+ }
+ // Check components
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+ const KernelProperties properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ TensorShape logits_sum_shape = src->tensor_shape();
+ TensorInfo logits(src->clone()->set_tensor_shape(logits_sum_shape));
+
+ // The sum tensor dim0 only need one element
+ logits_sum_shape.set(0, 1);
+ TensorInfo sum(src->clone()->set_tensor_shape(logits_sum_shape));
+
+ // Validate Component
+ ArgumentPack<ITensorInfo> arguments_exp_sum;
+ ArgumentPack<ITensorInfo> arguments_norm;
+
+ arguments_exp_sum.add_const_tensor(ACL_SRC_0, src);
+ arguments_exp_sum.add_const_tensor(ACL_DST_0, &sum);
+ arguments_exp_sum.add_const_tensor(ACL_DST_1, &logits);
+
+ arguments_norm.add_const_tensor(ACL_SRC_0, &logits);
+ arguments_norm.add_const_tensor(ACL_SRC_1, &sum);
+ arguments_norm.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+
+ ARM_COMPUTE_UNUSED(properties, attributes);
+ return Status(ErrorCode::RUNTIME_ERROR, "GpuSoftmax is not implemented");
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+
+ return Status{ErrorCode::RUNTIME_ERROR, "GpuSoftmax is not Supported"};
+}
+
+Status GpuSoftmax::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const Attributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !dst->has_valid_id());
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->num_dimensions() > 4, "Only up to 4 dimensions are supported");
+ ARM_COMPUTE_RETURN_ERROR_ON(attributes.axis() < static_cast<int32_t>(-src->num_dimensions()) ||
+ static_cast<int32_t>(src->num_dimensions()) <= attributes.axis());
+
+ // Auto initialize dst tensor info
+ TensorInfo dst_info_to_validate = *dst;
+ auto_init_if_empty(dst_info_to_validate, *src->clone());
+
+ const size_t actual_axis =
+ static_cast<size_t>(wrap_around(attributes.axis(), static_cast<int32_t>(src->num_dimensions())));
+ const bool needs_permute = actual_axis != 0;
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(needs_permute, "Dynamic fusion softmax on axis!=0 not supported yet.");
+
+ // Perform fusion test and check if the operator meets the fusion constraints
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op(*sketch.gpu_context(), src, &dst_info_to_validate, attributes);
+}
+
+void GpuSoftmax::create_op(GpuWorkloadSketch &sketch, ITensorInfo *src, ITensorInfo *dst, const Attributes &attributes)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_LOG_PARAMS(src, dst, attributes);
+ TensorShape logits_sum_shape = src->tensor_shape();
+ ITensorInfo *logits = sketch.implementation().create_auxiliary_tensor(
+ src->clone()->set_id(ITensorInfo::invalid_tensor_id).set_tensor_shape(logits_sum_shape));
+ logits_sum_shape.set(0, 1);
+ ITensorInfo *sum = sketch.implementation().create_auxiliary_tensor(
+ src->clone()->set_id(ITensorInfo::invalid_tensor_id).set_tensor_shape(logits_sum_shape));
+
+ // Auto initialize dst tensor info and the auxiliary tensor infos as well
+ auto_init_if_empty(*dst, *src->clone());
+
+ // Assert validation
+ ARM_COMPUTE_ERROR_THROW_ON(GpuSoftmax::validate_op(sketch, src, dst, attributes));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(logits, sum);
+
+ // Translate into components and add to component graph
+ auto &comp_graph = sketch.implementation().component_graph();
+ const auto sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = sketch_ctx->cl_compile_context();
+ ARM_COMPUTE_UNUSED(cl_compile_ctx);
+ ARM_COMPUTE_ERROR_ON(cl_compile_ctx == nullptr);
+
+ // Add Direct Conv2d Component
+ {
+ auto properties = IGpuKernelComponent::Properties();
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments_exp_sum;
+ ArgumentPack<ITensorInfo> arguments_norm;
+
+ arguments_exp_sum.add_const_tensor(ACL_SRC_0, src);
+ arguments_exp_sum.add_const_tensor(ACL_DST_0, sum);
+ arguments_exp_sum.add_const_tensor(ACL_DST_1, logits);
+
+ arguments_norm.add_const_tensor(ACL_SRC_0, logits);
+ arguments_norm.add_const_tensor(ACL_SRC_1, sum);
+ arguments_norm.add_const_tensor(ACL_DST_0, dst);
+
+ // Add to component graph -- NOT IMPLEMENTED
+ ARM_COMPUTE_UNUSED(comp_graph, attributes);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuSub.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuSub.cpp
new file mode 100644
index 0000000000..c53453a15c
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuSub.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h"
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+
+#include "src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+Status GpuSub::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *lhs, const ITensorInfo *rhs)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->data_type() != rhs->data_type(), "Input tensors must be the same data type");
+
+ // Set the elementwise operation to Sub then call the elementwise common validate_op
+ ElementwiseBinaryCommonAttributes common_attributes{};
+ common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Sub);
+ return GpuElementwiseBinaryCommon::validate_op(sketch, lhs, rhs, common_attributes);
+}
+
+Status GpuSub::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *lhs, const ITensorInfo *rhs)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->data_type() != rhs->data_type(), "Input tensors must be the same data type");
+
+ // Set the elementwise operation to Sub then call the elementwise common is_supported_op
+ ElementwiseBinaryCommonAttributes common_attributes{};
+ common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Sub);
+ return GpuElementwiseBinaryCommon::is_supported_op(context, lhs, rhs, common_attributes);
+}
+
+ITensorInfo *GpuSub::create_op(GpuWorkloadSketch &sketch, ITensorInfo *lhs, ITensorInfo *rhs)
+{
+ // No need to log or validate as they'll be handled inside GpuElementwiseBinaryCommon::create_op()
+ // Set the elementwise operation to Sub then call the elementwise common create_op
+ ElementwiseBinaryCommonAttributes common_attributes{};
+ common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Sub);
+ return GpuElementwiseBinaryCommon::create_op(sketch, lhs, rhs, common_attributes);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuTanh.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuTanh.cpp
new file mode 100644
index 0000000000..b9d01966b3
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuTanh.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuTanh.h"
+
+#include "arm_compute/core/experimental/Types.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentActivation.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+Status is_supported_op_helper(const GpuWorkloadContext &context, const ITensorInfo *src, const ITensorInfo *dst)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ auto_init_if_empty(dst_info_to_validate, *src->clone());
+
+ const ClComponentActivation::Attributes act_info{ActivationLayerInfo::ActivationFunction::TANH};
+
+ // Check components
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ // Validate Activation Component
+ const auto properties =
+ IGpuKernelComponent::Properties().stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC, src);
+ arguments.add_const_tensor(ACL_DST, dst_info_to_validate_ptr);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentActivation::validate(properties, arguments, act_info));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+ return Status{};
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Simple;
+} // namespace
+
+Status GpuTanh::is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *src)
+{
+ return is_supported_op_helper(context, src, nullptr);
+}
+
+Status GpuTanh::validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *src)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+
+ // Check if tensors have valid id, i.e. they are created from a sketch
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+
+ // Refer to GpuConv2d::validate_op() for id-validness of this TensorInfo object
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ auto_init_if_empty(dst_info_to_validate, *src->clone());
+
+ // Perform fusion test to check if the operator meets fusion constraints
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC, src);
+ tensors.add_const_tensor(ACL_DST, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), src, &dst_info_to_validate);
+}
+
+ITensorInfo *GpuTanh::create_op(GpuWorkloadSketch &sketch, ITensorInfo *src)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_LOG_PARAMS(src);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuTanh::validate_op(sketch, src));
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
+
+ // Auto initialize dst tensor
+ auto_init_if_empty(*dst, *src->clone());
+
+ // Translate into components and add to component graph
+ GpuKernelComponentGraph &comp_graph = sketch.implementation().component_graph();
+
+ const ClComponentActivation::Attributes act_info{ActivationLayerInfo::ActivationFunction::TANH};
+
+ const auto *const sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ // Add Activation Component
+ auto properties = IGpuKernelComponent::Properties();
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC, src);
+ arguments.add_const_tensor(ACL_DST, dst);
+ comp_graph.add_new_component<ClComponentActivation>(properties, arguments, act_info);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC, src);
+ tensors.add_const_tensor(ACL_DST, dst);
+
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp b/src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp
new file mode 100644
index 0000000000..d79a4c42c9
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentElementwiseBinary.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+void calculate_and_init_dst_if_empty(ITensorInfo *dst, const ITensorInfo *lhs, const ITensorInfo *rhs)
+{
+ if (dst->total_size() == 0U)
+ {
+ const std::pair<TensorShape, ValidRegion> broadcast_pair =
+ ITensorInfo::broadcast_shape_and_valid_region(*lhs, *rhs);
+ auto_init_if_empty(*dst, lhs->clone()->set_tensor_shape(broadcast_pair.first));
+ }
+}
+
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+ const ITensorInfo *lhs,
+ const ITensorInfo *rhs,
+ const ITensorInfo *dst,
+ const ElementwiseBinaryCommonAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+ if (dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, lhs, rhs);
+
+ // Check components
+ if (context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+
+ // Validate ElementwiseBinary Component
+ {
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, lhs);
+ arguments.add_const_tensor(ACL_SRC_1, rhs);
+ arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentElementwiseBinary::validate(arguments, attributes));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+
+ return Status{};
+}
+
+GpuOperatorType operator_type = GpuOperatorType::Simple;
+} // namespace
+
+ElementwiseBinaryCommonAttributes &
+ElementwiseBinaryCommonAttributes::operation(const ElementwiseBinaryCommonAttributes::ElementwiseOp &operation)
+{
+ _operation = operation;
+ return *this;
+}
+
+ElementwiseBinaryCommonAttributes::ElementwiseOp ElementwiseBinaryCommonAttributes::operation() const
+{
+ return _operation;
+}
+
+Status GpuElementwiseBinaryCommon::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *lhs,
+ const ITensorInfo *rhs,
+ const ElementwiseBinaryCommonAttributes &attributes)
+{
+ return is_supported_op_helper(context, lhs, rhs, nullptr, attributes);
+}
+
+Status GpuElementwiseBinaryCommon::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *lhs,
+ const ITensorInfo *rhs,
+ const ElementwiseBinaryCommonAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
+ ARM_COMPUTE_RETURN_ERROR_ON(!lhs->has_valid_id() || !rhs->has_valid_id());
+
+ // Refer to GpuConv2d::validate_op() for id-validness of this TensorInfo object
+ TensorInfo dst_info_to_validate;
+
+ // Auto initialize dst tensor info
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, lhs, rhs);
+
+ // Perform fusion test
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, lhs);
+ tensors.add_const_tensor(ACL_SRC_1, rhs);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op_helper(*sketch.gpu_context(), lhs, rhs, &dst_info_to_validate, attributes);
+}
+
+ITensorInfo *GpuElementwiseBinaryCommon::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *lhs,
+ ITensorInfo *rhs,
+ const ElementwiseBinaryCommonAttributes &attributes)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs);
+ ARM_COMPUTE_LOG_PARAMS(lhs, rhs);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuElementwiseBinaryCommon::validate_op(sketch, lhs, rhs, attributes));
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
+
+ // Auto initialize dst tensor
+ calculate_and_init_dst_if_empty(dst, lhs, rhs);
+
+ // Translate into components and add to component graph
+ auto &comp_graph = sketch.implementation().component_graph();
+
+ const auto sketch_ctx = sketch.implementation().context();
+
+ if (sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(sketch_ctx->cl_compile_context());
+
+ // Add ElementwiseBinary Component
+ {
+ auto properties = IGpuKernelComponent::Properties();
+ properties.stage(UnitWorkloadStage{UnitWorkloadStage::Stage::Run});
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, lhs);
+ arguments.add_const_tensor(ACL_SRC_1, rhs);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentElementwiseBinary>(properties, arguments, attributes);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, lhs);
+ tensors.add_const_tensor(ACL_SRC_1, rhs);
+ tensors.add_tensor(ACL_DST_0, dst);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h b/src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h
new file mode 100644
index 0000000000..0b58b6eb96
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_INTERNAL_GPUELEMENTWISEBINARYCOMMON
+#define SRC_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_INTERNAL_GPUELEMENTWISEBINARYCOMMON
+
+#include "arm_compute/core/Error.h"
+
+namespace arm_compute
+{
+/** Forward declaration */
+class ITensorInfo;
+
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ElementwiseBinaryCommonAttributes
+{
+public:
+ enum class ElementwiseOp
+ {
+ Add, /**< (x + y) */
+ Sub, /**< (x - y) */
+ Div, /**< (x / y) */
+ Mul, /**< (x * y) */
+ Min, /**< Min(x, y) */
+ Max, /**< Max(x, y) */
+ SquaredDiff, /**< (x - y)^2 */
+ Power, /**< x ^ y */
+ Prelu, /**< y*x if x < 0, x otherwise */
+ };
+ /** Set operation*/
+ ElementwiseBinaryCommonAttributes &operation(const ElementwiseBinaryCommonAttributes::ElementwiseOp &operation);
+ /** Get operation*/
+ ElementwiseOp operation() const;
+
+private:
+ ElementwiseOp _operation; /**< Elementwise operation */
+};
+
+/** Forward declaration */
+class GpuWorkloadContext;
+class GpuWorkloadSketch;
+
+/** Operator interface. */
+class GpuElementwiseBinaryCommon final
+{
+public:
+ /** Create an operator and fuse it into the workload sketch.
+ * @note If @ref validate_op() fails, the creation also fails and may throw an error.
+ * @note If @ref validate_op() fails, @p sketch remains unchanged and valid.
+ *
+ * Valid data type configurations are checked at the operator level i.e. GpuAdd::validate_op(), GpuSub::validate_op(), ... etc.
+ *
+ * Valid data layouts:
+ * - Any
+ *
+ * @param[in,out] sketch Workload sketch into which the operator will be fused
+ * @param[in] lhs Left hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
+ * @param[in] rhs Right hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
+ * @param[in] attributes ElementwiseBinaryCommonAttributes containing the operator type: ADD, SUB, DIV, ... etc.
+ *
+ * @return Pointer for the destination tensor info
+ */
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *lhs,
+ ITensorInfo *rhs,
+ const ElementwiseBinaryCommonAttributes &attributes);
+ /** Check if the operator configuration is supported, irrespective of fusion
+ *
+ * @param[in] context Workload context within which the operator is running
+ * @param[in] lhs Left hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
+ * @param[in] rhs Right hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
+ * @param[in] attributes ElementwiseBinaryCommonAttributes containing the operator type: ADD, SUB, DIV, ... etc.
+ *
+ * @return Status
+ */
+ static Status is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *lhs,
+ const ITensorInfo *rhs,
+ const ElementwiseBinaryCommonAttributes &attributes);
+ /** Validate the operator and check if it can be fused into the workload sketch.
+ *
+ * Parameters are similar to @ref GpuElementwiseBinaryCommon::create_op()
+ *
+ * @return Status
+ */
+ static Status validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *rhs,
+ const ITensorInfo *lhs,
+ const ElementwiseBinaryCommonAttributes &attributes);
+};
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_INTERNAL_GPUELEMENTWISEBINARYCOMMON */