aboutsummaryrefslogtreecommitdiff
path: root/src/dynamic_fusion
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2022-12-06 20:48:31 +0000
committerGunes Bayir <gunes.bayir@arm.com>2022-12-09 14:05:08 +0000
commit1dc6ff1749ae8e9fbf0b99308d6ebb65be10cc76 (patch)
tree79588a3305127bf43dcf64e0b55a32dbac96c437 /src/dynamic_fusion
parentdf6a3b05842a98702437347ca269138ccd55f852 (diff)
downloadComputeLibrary-1dc6ff1749ae8e9fbf0b99308d6ebb65be10cc76.tar.gz
Implement Cast operator in dynamic fusion
The operator is migrated into dynamic fusion for all data types supported Resolves: COMPMID-5693 Change-Id: I3c550d3d1cd04570f453beae678c3f60d4cb1a73 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8755 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/dynamic_fusion')
-rw-r--r--src/dynamic_fusion/sketch/attributes/CastAttributes.cpp56
-rw-r--r--src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp82
-rw-r--r--src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h133
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp167
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp201
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h103
6 files changed, 742 insertions, 0 deletions
diff --git a/src/dynamic_fusion/sketch/attributes/CastAttributes.cpp b/src/dynamic_fusion/sketch/attributes/CastAttributes.cpp
new file mode 100644
index 0000000000..4ad94268f4
--- /dev/null
+++ b/src/dynamic_fusion/sketch/attributes/CastAttributes.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+CastAttributes &CastAttributes::data_type(const DataType &data_type)
+{
+ _data_type = data_type;
+ return *this;
+}
+
+DataType CastAttributes::data_type() const
+{
+ return _data_type;
+}
+
+CastAttributes &CastAttributes::convert_policy(const ConvertPolicy &convert_policy)
+{
+ _convert_policy = convert_policy;
+ return *this;
+}
+
+ConvertPolicy CastAttributes::convert_policy() const
+{
+ return _convert_policy;
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp
new file mode 100644
index 0000000000..007ba6380c
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "ClComponentCast.h"
+
+#include "arm_compute/core/Error.h"
+#include "src/core/CL/CLValidate.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+Status ClComponentCast::validate(
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings)
+{
+ ARM_COMPUTE_UNUSED(properties, attributes, settings);
+
+ const ITensorInfo *src = tensors.get_const_tensor(TensorType::ACL_SRC_0);
+ const ITensorInfo *dst = tensors.get_const_tensor(TensorType::ACL_DST_0);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == attributes.data_type(), "input and target data types should be different");
+
+ // Validate in case of configured dst
+ if(dst->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_type() != attributes.data_type(), "dst and target data types should be same");
+ }
+
+ return Status{};
+}
+ClComponentCast::ClComponentCast(ComponentId id,
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings)
+ : IGpuKernelComponent{ id, properties, tensors },
+ _component_writer{ std::make_unique<ClTemplateCast>(id, tensors, attributes) }
+{
+ ARM_COMPUTE_UNUSED(attributes, settings);
+}
+ClComponentCast::~ClComponentCast()
+{
+}
+const IGpuTemplateComponentWriter *ClComponentCast::template_writer() const
+{
+ return _component_writer.get();
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h
new file mode 100644
index 0000000000..d0f75b1062
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTCAST
+#define SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTCAST
+
+#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
+#include "src/dynamic_fusion/sketch/gpu/components/IGpuKernelComponent.h"
+
+namespace arm_compute
+{
+/** Forward declaration */
+class ITensorInfo;
+namespace experimental
+{
+namespace dynamic_fusion
+{
+/** Forward declaration */
+template <typename T>
+class ArgumentPack;
+
+/** Component specific settings
+ */
+class ClComponentCastSettings
+{
+public:
+private:
+};
+
+/** Forward declaration */
+class ClTemplateCast;
+
+class ClComponentCast final : public IGpuKernelComponent
+{
+public:
+ /** Attributes are a set of backend-agnostic parameters that define what a component does */
+ using Attributes = CastAttributes;
+ /** Settings are a set of backend-specific parameters that influence the implementation of a component */
+ using Settings = ClComponentCastSettings;
+
+ /** Validate the component
+ *
+ * @param[in] properties Component properties @ref Properties
+ * @param[in,out] tensors Tensor arguments to the component
+ * @param[in] attributes Component attributes @ref Attributes
+ * @param[in] settings Component settings @ref Settings
+ *
+ * @return Status Validation results
+ *
+ * Tensor argument names:
+ * - ACL_SRC_0: Input
+ * - ACL_DST_0: Output
+ *
+ * Tensor argument constness:
+ * - ACL_SRC_0: Const
+ * - ACL_DST_0: Const
+ *
+ * Valid data layouts:
+ * - All
+ *
+ ** Valid data type configurations:
+ * |ACL_SRC_0 |ACL_DST_0 |
+ * |:--------------|:--------------------------------------|
+ * |U8 | S8, U16, S16, U32, S32, F16, F32 |
+ * |U16 | U8, S8, S16, U32, S32, F16, F32 |
+ * |S16 | U8, S8, U16, U32, S32, F16, F32 |
+ * |U32 | U8, S8, U16, S16, S32, F16, F32 |
+ * |S32 | U8, S8, U16, S16, U32, F16, F32 |
+ * |F16 | U8, S8, U16, S16, U32, S32, F32 |
+ * |F32 | U8, S8, U16, S16, U32, S32, F16 |
+ */
+ static Status validate(
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings);
+
+ /** Constructor
+ *
+ * Similar to @ref ClComponentCast::validate()
+ */
+ ClComponentCast(ComponentId id,
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings);
+
+ /** Destructor */
+ ~ClComponentCast() override;
+ /** Prevent instances of this class from being copy constructed */
+ ClComponentCast(const ClComponentCast &component) = delete;
+ /** Prevent instances of this class from being copied */
+ ClComponentCast &operator=(const ClComponentCast &component) = delete;
+ /** Allow instances of this class to be move constructed */
+ ClComponentCast(ClComponentCast &&component) = default;
+ /** Allow instances of this class to be moved */
+ ClComponentCast &operator=(ClComponentCast &&component) = default;
+ /** Get template writer for the component */
+ const IGpuTemplateComponentWriter *template_writer() const override;
+ /** Get component type */
+ GpuComponentType type() const override
+ {
+ return GpuComponentType::Complex;
+ }
+
+private:
+ std::unique_ptr<ClTemplateCast> _component_writer;
+};
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTCAST */
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp
new file mode 100644
index 0000000000..9e5e735c60
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h"
+
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h"
+
+#include "src/common/utils/Log.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+}
+Status GpuCast::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const CastAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+
+ // Auto initialize dst tensor info
+ TensorInfo dst_info_to_validate = *dst;
+ auto_init_if_empty(dst_info_to_validate, src->clone()->set_data_type(attributes.data_type()));
+
+ // Check support level
+ // Data Type
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src,
+ 1,
+ DataType::U8, DataType::S8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
+ DataType::U16, DataType::U32, DataType::S32, DataType::F16,
+ DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst_info_to_validate,
+ 1,
+ DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16,
+ DataType::U16, DataType::U32, DataType::S32, DataType::F16,
+ DataType::F32);
+
+ if(context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+ // Validate Cast Component
+ {
+ const auto properties = IGpuKernelComponent::Properties().stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run });
+ auto settings = ClComponentCast::Settings();
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentCast::validate(properties, arguments, attributes, settings));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+
+ return Status{};
+}
+
+Status GpuCast::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const CastAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !dst->has_valid_id());
+
+ // Auto initialize dst tensor info
+ TensorInfo dst_info_to_validate = *dst;
+ auto_init_if_empty(dst_info_to_validate, src->clone()->set_data_type(attributes.data_type()));
+
+ // Perform fusion test
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op(*sketch.gpu_context(), src, &dst_info_to_validate, attributes);
+}
+
+void GpuCast::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *dst,
+ const CastAttributes &attributes)
+{
+ // Assert validation
+ ARM_COMPUTE_ERROR_THROW_ON(GpuCast::validate_op(sketch, src, dst, attributes));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_LOG_PARAMS(src, dst, attributes);
+
+ // Auto initialize dst tensor info if empty
+ auto_init_if_empty(*dst, src->clone()->set_data_type(attributes.data_type()));
+
+ // Translate into components and add to component graph
+ GpuKernelComponentGraph &comp_graph = sketch.implementation().component_graph();
+ const auto *sketch_ctx = sketch.implementation().context();
+
+ if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ ARM_COMPUTE_ERROR_ON(sketch_ctx->cl_compile_context() == nullptr);
+
+ // Add Depthwise Conv2d Component
+ {
+ const auto properties = IGpuKernelComponent::Properties().stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run });
+ auto settings = ClComponentCast::Settings();
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentCast>(properties, arguments, attributes, settings);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp
new file mode 100644
index 0000000000..1ac49406a8
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "ClTemplateCast.h"
+
+#include "src/core/helpers/WindowHelpers.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuKernelComponentGroup.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ClTemplateCast::ClTemplateCast(ComponentId id, const ArgumentPack<ITensorInfo> &tensors, const Attributes &attributes)
+ : IGpuTemplateComponentWriter{ id, tensors }, _src{}, _dst{}, _attributes{ attributes }
+{
+ _src = this->tensors().get_const_tensor(TensorType::ACL_SRC_0);
+ _dst = this->tensors().get_const_tensor(TensorType::ACL_DST_0);
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
+}
+
+std::string ClTemplateCast::get_name() const
+{
+ const size_t src_size = data_size_from_type(_src->data_type());
+ const size_t dst_size = data_size_from_type(_dst->data_type());
+
+ return (src_size >= dst_size) ? "cast_down" : "cast_up";
+}
+
+std::string ClTemplateCast::get_component_code(const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+
+ const std::string kernel_name = get_name();
+
+ std::string code = R"_(
+//------------------ START KERNEL {{meta_kernel_id}} ---------------------
+// IN_0(src) {{src}}
+// OUT(dst, accum) {{dst}}
+
+TILE({{DATA_TYPE_OUT}}, M0, N0, {{dst}});
+TILE(uint, M0, 1, g_dst_indirect_y);
+{
+ {{src}}_offset_first_element_in_bytes += get_global_id(2) * {{src}}_stride_z;
+
+ TILE({{DATA_TYPE_IN}}, M0, N0, in_data);
+ T_LOAD({{DATA_TYPE_IN}}, M0, N0, BUFFER, {{src}}, g_ind_0, g_ind_1, 1, {{src}}_stride_y, in_data);
+)_";
+
+ code += R"_(
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+)_";
+
+ if(kernel_name == "cast_down" && is_data_type_quantized(_src->data_type()))
+ {
+ code += R"_(
+ in_data[m0].v ^= (VEC_DATA_TYPE({{DATA_TYPE_IN}}, N0))0x80;
+)_";
+ }
+
+ if(kernel_name == "cast_down" && (is_data_type_float(_src->data_type()) || _attributes.convert_policy() == ConvertPolicy::SATURATE))
+ {
+ code += R"_(
+ {{dst}}[m0].v = CONVERT_SAT(in_data[m0].v, VEC_DATA_TYPE({{DATA_TYPE_OUT}}, N0));
+)_";
+ }
+ else
+ {
+ code += R"_(
+ {{dst}}[m0].v = CONVERT(in_data[m0].v, VEC_DATA_TYPE({{DATA_TYPE_OUT}}, N0));
+)_";
+ }
+
+ code += R"_(
+ })
+)_";
+
+ code += R"_(
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ g_dst_indirect_y[i].v = (uint)min((int)(g_ind_1 + i), (int)({{arg_dst}}_w) - 1);
+ g_dst_indirect_y[i].v += (int)(g_ind_2 % {{arg_dst}}_h) * (int)({{arg_dst}}_w);
+ g_dst_indirect_y[i].v += (int)(g_ind_2 / {{arg_dst}}_h) * (int)({{arg_dst}}_w * {{arg_dst}}_h);
+ })
+}
+//------------------ END KERNEL {{meta_kernel_id}} ---------------------
+)_";
+
+ return code;
+}
+
+void ClTemplateCast::declare_variables(GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const
+{
+ vtable.declare_variable(
+ _src,
+ GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_4D_t_Buffer),
+ comp_group.is_intermediate_tensor(_src),
+ "src");
+
+ vtable.declare_variable(
+ _dst,
+ GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_4D_t_Buffer),
+ comp_group.is_intermediate_tensor(_dst),
+ "dst");
+}
+
+TagLUT ClTemplateCast::get_tag_lut(const GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+
+ TagLUT lut{};
+
+ // Arguments and global shared variables
+ lut["src"] = vtable.get_variable(_src);
+ lut["dst"] = vtable.get_variable(_dst);
+
+ const auto dst_argument = vtable.get_variable(comp_group.get_dst_tensors()[0]);
+ lut["arg_dst"] = dst_argument.uniq_name;
+
+ // Local build options
+ lut["meta_kernel_id"] = id();
+
+ lut["DATA_TYPE_IN"] = get_cl_type_from_data_type(_src->data_type());
+ lut["DATA_TYPE_OUT"] = get_cl_type_from_data_type(_dst->data_type());
+
+ return lut;
+}
+
+CLBuildOptions ClTemplateCast::get_build_options(const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+
+ const auto root_window = comp_group.get_root_component()->template_writer()->get_window();
+ const unsigned int n0 = root_window.x().step();
+ const unsigned int m0 = root_window.y().step();
+
+ // Set build options
+ CLBuildOptions build_opts{};
+ build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
+ build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(_src->dimension(0) % n0));
+ build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
+
+ return build_opts;
+}
+
+std::string ClTemplateCast::get_config_id() const
+{
+ std::string config_id{};
+
+ config_id += "_";
+ config_id += lower_string(string_from_data_type(_src->data_type()));
+ config_id += "_";
+ config_id += lower_string(string_from_data_type(_dst->data_type()));
+ config_id += "_";
+ config_id += support::cpp11::to_string(_src->dimension(0));
+ config_id += "_";
+ config_id += support::cpp11::to_string(_src->dimension(1));
+
+ return config_id;
+}
+
+std::set<std::string> ClTemplateCast::get_headers_list() const
+{
+ return std::set<std::string>{ "helpers.h", "tile_helpers.h" };
+}
+
+Window ClTemplateCast::get_window() const
+{
+ ARM_COMPUTE_ERROR_ON_MSG(_dst->tensor_shape().total_size() == 0U, "Destination tensor is not initialized");
+
+ const unsigned int n0 = adjust_vec_size(16 / _src->element_size(), _src->dimension(0));
+ Window win = calculate_max_window(*_dst, Steps(n0));
+ return win.collapse(win, Window::DimZ);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h
new file mode 100644
index 0000000000..08255ca5af
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATECAST
+#define SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATECAST
+
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h"
+#include "src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.h"
+#include "src/dynamic_fusion/sketch/gpu/template_writer/IGpuTemplateComponentWriter.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ClTemplateCast final : public IGpuTemplateComponentWriter
+{
+public:
+ using Attributes = ClComponentCast::Attributes;
+
+ /** Constructor
+ *
+ * @param[in] id Component id
+ * @param[in] tensors Tensor arguments to the components
+ * @param[in] attributes Component attributes
+ */
+ ClTemplateCast(ComponentId id, const ArgumentPack<ITensorInfo> &tensors, const Attributes &attributes);
+ /** Prevent instances of this class from being copy constructed */
+ ClTemplateCast(const ClTemplateCast &cast) = delete;
+ /** Prevent instances of this class from being copied */
+ ClTemplateCast &operator=(const ClTemplateCast &cast) = delete;
+ /** Allow instances of this class to be move constructed */
+ ClTemplateCast(ClTemplateCast &&cast) = default;
+ /** Allow instances of this class to be moved */
+ ClTemplateCast &operator=(ClTemplateCast &&cast) = default;
+ /** Generate kernel component name */
+ std::string get_name() const override;
+ /** Generate kernel component code template
+ *
+ * @param[in] comp_group Component group of which the component is a part of
+ *
+ * @return std::string Component code
+ */
+ std::string get_component_code(const ComponentGroup &comp_group) const override;
+ /** Declare all variables used by the component in the @p vtable
+ *
+ * @param[out] vtable Variable table
+ * @param[in] comp_group Component group of which the component is a part of
+ */
+ void declare_variables(GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const override;
+ /** Generate the tag look-up table used to instantiate the component code.
+ *
+ * @param[in] vtable Variable table
+ * @param[in] comp_group Component group of which the component is a part of
+ *
+ * @return TagLUT Tag lookup table
+ */
+ TagLUT get_tag_lut(const GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const override;
+ /** Generate the build options used in the component
+ *
+ * @param[in] comp_group Component group of which the component is a part of
+ *
+ * @return CLBuildOptions Build options
+ */
+ CLBuildOptions get_build_options(const ComponentGroup &comp_group) const override;
+ /** Generate the component config id string used for tuning */
+ std::string get_config_id() const override;
+ /** Generate the header list used in the component */
+ std::set<std::string> get_headers_list() const override;
+ /** Generate the execution window for the component */
+ Window get_window() const override;
+
+private:
+ const ITensorInfo *_src;
+ const ITensorInfo *_dst;
+ const Attributes _attributes;
+};
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATECAST */