aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp4
-rw-r--r--arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h59
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h99
-rw-r--r--filelist.json6
-rw-r--r--src/dynamic_fusion/sketch/attributes/CastAttributes.cpp56
-rw-r--r--src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp82
-rw-r--r--src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h133
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp167
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp201
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h103
-rw-r--r--tests/validation/dynamic_fusion/gpu/cl/Cast.cpp235
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h175
-rw-r--r--utils/TypePrinter.h30
13 files changed, 1349 insertions, 1 deletions
diff --git a/Android.bp b/Android.bp
index 8094c8a660..77ca59536b 100644
--- a/Android.bp
+++ b/Android.bp
@@ -588,6 +588,7 @@ cc_library_static {
"src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp",
"src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp",
"src/dynamic_fusion/sketch/OperatorAttributes.cpp",
+ "src/dynamic_fusion/sketch/attributes/CastAttributes.cpp",
"src/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.cpp",
"src/dynamic_fusion/sketch/gpu/GpuKernelArgument.cpp",
"src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp",
@@ -597,15 +598,18 @@ cc_library_static {
"src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp",
"src/dynamic_fusion/sketch/gpu/GpuWorkloadContext.cpp",
"src/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.cpp",
+ "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentElementwiseBinary.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuAdd.cpp",
+ "src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp",
+ "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp",
diff --git a/arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h b/arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h
new file mode 100644
index 0000000000..59efc8bd5d
--- /dev/null
+++ b/arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_ATTRIBUTES_CASTATTRIBUTES
+#define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_ATTRIBUTES_CASTATTRIBUTES
+
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+/** Attributes are backend-agnostic parameters (in addition to the input/output tensors) of an operator.
+ */
+
+/** Cast attributes */
+class CastAttributes
+{
+public:
+ /** Set Data Type to be casted to */
+ CastAttributes &data_type(const DataType &data_type);
+ /** Get Data Type to be casted to */
+ DataType data_type() const;
+ /** Set Policy */
+ CastAttributes &convert_policy(const ConvertPolicy &policy);
+ /** Get Policy */
+ ConvertPolicy convert_policy() const;
+
+private:
+ DataType _data_type{}; /**< Data Type to be casted to */
+ ConvertPolicy _convert_policy{ ConvertPolicy::SATURATE }; /**< Convert Policy */
+};
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_ATTRIBUTES_CASTATTRIBUTES */
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h
new file mode 100644
index 0000000000..4b427be06a
--- /dev/null
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUCAST
+#define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUCAST
+
+#include "arm_compute/core/ITensorInfo.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+/** Forward declaration */
+class GpuWorkloadContext;
+class GpuWorkloadSketch;
+
+/** Operator interface. */
+class GpuCast final
+{
+public:
+ /** Attributes are a set of backend-agnostic parameters that define what an operator does */
+ using Attributes = CastAttributes;
+ /** Create an operator and fuse it into the workload sketch.
+ * @note If @ref validate_op() fails, the creation also fails and may throw an error.
+ * @note If @ref validate_op() fails, @p sketch remains unchanged and valid.
+ *
+ * Valid data type configurations:
+ * |src |dst |
+ * |:--------------|:--------------------------------------|
+ * |U8 | S8, U16, S16, U32, S32, F16, F32 |
+ * |U16 | U8, S8, S16, U32, S32, F16, F32 |
+ * |S16 | U8, S8, U16, U32, S32, F16, F32 |
+ * |U32 | U8, S8, U16, S16, S32, F16, F32 |
+ * |S32 | U8, S8, U16, S16, U32, F16, F32 |
+ * |F16 | U8, S8, U16, S16, U32, S32, F32 |
+ * |F32 | U8, S8, U16, S16, U32, S32, F16 |
+ *
+ * Input data type must be different than output data type.
+ *
+ * Valid data layouts:
+ * - Any
+ *
+ * @param[in,out] sketch Workload sketch into which the operator will be fused
+ * @param[in] src Left hand side tensor info. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[out] dst Destination tensor info. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
+ * @param[in] attributes Operator attributes
+ */
+ static void create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *dst,
+ const Attributes &attributes);
+ /** Check if the operator configuration is supported, irrespective of fusion
+ *
+ * @param[in] context Workload context within which the operator is running
+ * @param[in] src Left hand side tensor info. Data types supported: All.
+ * @param[out] dst Destination tensor info. Data types supported: All.
+ * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
+ * @param[in] attributes Operator attributes
+ */
+ static Status is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const Attributes &attributes);
+ /** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch.
+ * Similar to @ref GpuCast::create_op()
+ */
+ static Status validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const Attributes &attributes);
+};
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUCAST */
diff --git a/filelist.json b/filelist.json
index 2db128791f..9b1db9b424 100644
--- a/filelist.json
+++ b/filelist.json
@@ -2199,6 +2199,7 @@
"dynamic_fusion": [
"src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp",
"src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp",
+ "src/dynamic_fusion/sketch/attributes/CastAttributes.cpp",
"src/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.cpp",
"src/dynamic_fusion/sketch/OperatorAttributes.cpp",
"src/dynamic_fusion/sketch/gpu/GpuKernelArgument.cpp",
@@ -2209,14 +2210,17 @@
"src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.cpp",
"src/dynamic_fusion/sketch/gpu/GpuWorkloadContext.cpp",
"src/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.cpp",
+ "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentElementwiseBinary.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.cpp",
- "src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuAdd.cpp",
+ "src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp",
+ "src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp",
+ "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp",
diff --git a/src/dynamic_fusion/sketch/attributes/CastAttributes.cpp b/src/dynamic_fusion/sketch/attributes/CastAttributes.cpp
new file mode 100644
index 0000000000..4ad94268f4
--- /dev/null
+++ b/src/dynamic_fusion/sketch/attributes/CastAttributes.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+CastAttributes &CastAttributes::data_type(const DataType &data_type)
+{
+ _data_type = data_type;
+ return *this;
+}
+
+DataType CastAttributes::data_type() const
+{
+ return _data_type;
+}
+
+CastAttributes &CastAttributes::convert_policy(const ConvertPolicy &convert_policy)
+{
+ _convert_policy = convert_policy;
+ return *this;
+}
+
+ConvertPolicy CastAttributes::convert_policy() const
+{
+ return _convert_policy;
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp
new file mode 100644
index 0000000000..007ba6380c
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "ClComponentCast.h"
+
+#include "arm_compute/core/Error.h"
+#include "src/core/CL/CLValidate.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+Status ClComponentCast::validate(
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings)
+{
+ ARM_COMPUTE_UNUSED(properties, attributes, settings);
+
+ const ITensorInfo *src = tensors.get_const_tensor(TensorType::ACL_SRC_0);
+ const ITensorInfo *dst = tensors.get_const_tensor(TensorType::ACL_DST_0);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == attributes.data_type(), "input and target data types should be different");
+
+ // Validate in case of configured dst
+ if(dst->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_type() != attributes.data_type(), "dst and target data types should be same");
+ }
+
+ return Status{};
+}
+ClComponentCast::ClComponentCast(ComponentId id,
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings)
+ : IGpuKernelComponent{ id, properties, tensors },
+ _component_writer{ std::make_unique<ClTemplateCast>(id, tensors, attributes) }
+{
+ ARM_COMPUTE_UNUSED(attributes, settings);
+}
+ClComponentCast::~ClComponentCast()
+{
+}
+const IGpuTemplateComponentWriter *ClComponentCast::template_writer() const
+{
+ return _component_writer.get();
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h
new file mode 100644
index 0000000000..d0f75b1062
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTCAST
+#define SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTCAST
+
+#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
+#include "src/dynamic_fusion/sketch/gpu/components/IGpuKernelComponent.h"
+
+namespace arm_compute
+{
+/** Forward declaration */
+class ITensorInfo;
+namespace experimental
+{
+namespace dynamic_fusion
+{
+/** Forward declaration */
+template <typename T>
+class ArgumentPack;
+
+/** Component specific settings
+ */
+class ClComponentCastSettings
+{
+public:
+private:
+};
+
+/** Forward declaration */
+class ClTemplateCast;
+
+class ClComponentCast final : public IGpuKernelComponent
+{
+public:
+ /** Attributes are a set of backend-agnostic parameters that define what a component does */
+ using Attributes = CastAttributes;
+ /** Settings are a set of backend-specific parameters that influence the implementation of a component */
+ using Settings = ClComponentCastSettings;
+
+ /** Validate the component
+ *
+ * @param[in] properties Component properties @ref Properties
+ * @param[in,out] tensors Tensor arguments to the component
+ * @param[in] attributes Component attributes @ref Attributes
+ * @param[in] settings Component settings @ref Settings
+ *
+ * @return Status Validation results
+ *
+ * Tensor argument names:
+ * - ACL_SRC_0: Input
+ * - ACL_DST_0: Output
+ *
+ * Tensor argument constness:
+ * - ACL_SRC_0: Const
+ * - ACL_DST_0: Const
+ *
+ * Valid data layouts:
+ * - All
+ *
+ ** Valid data type configurations:
+ * |ACL_SRC_0 |ACL_DST_0 |
+ * |:--------------|:--------------------------------------|
+ * |U8 | S8, U16, S16, U32, S32, F16, F32 |
+ * |U16 | U8, S8, S16, U32, S32, F16, F32 |
+ * |S16 | U8, S8, U16, U32, S32, F16, F32 |
+ * |U32 | U8, S8, U16, S16, S32, F16, F32 |
+ * |S32 | U8, S8, U16, S16, U32, F16, F32 |
+ * |F16 | U8, S8, U16, S16, U32, S32, F32 |
+ * |F32 | U8, S8, U16, S16, U32, S32, F16 |
+ */
+ static Status validate(
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings);
+
+ /** Constructor
+ *
+ * Similar to @ref ClComponentCast::validate()
+ */
+ ClComponentCast(ComponentId id,
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings);
+
+ /** Destructor */
+ ~ClComponentCast() override;
+ /** Prevent instances of this class from being copy constructed */
+ ClComponentCast(const ClComponentCast &component) = delete;
+ /** Prevent instances of this class from being copied */
+ ClComponentCast &operator=(const ClComponentCast &component) = delete;
+ /** Allow instances of this class to be move constructed */
+ ClComponentCast(ClComponentCast &&component) = default;
+ /** Allow instances of this class to be moved */
+ ClComponentCast &operator=(ClComponentCast &&component) = default;
+ /** Get template writer for the component */
+ const IGpuTemplateComponentWriter *template_writer() const override;
+ /** Get component type */
+ GpuComponentType type() const override
+ {
+ return GpuComponentType::Complex;
+ }
+
+private:
+ std::unique_ptr<ClTemplateCast> _component_writer;
+};
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTCAST */
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp
new file mode 100644
index 0000000000..9e5e735c60
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h"
+
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h"
+
+#include "src/common/utils/Log.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+}
+Status GpuCast::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const CastAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+
+ // Auto initialize dst tensor info
+ TensorInfo dst_info_to_validate = *dst;
+ auto_init_if_empty(dst_info_to_validate, src->clone()->set_data_type(attributes.data_type()));
+
+ // Check support level
+ // Data Type
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src,
+ 1,
+ DataType::U8, DataType::S8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
+ DataType::U16, DataType::U32, DataType::S32, DataType::F16,
+ DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst_info_to_validate,
+ 1,
+ DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16,
+ DataType::U16, DataType::U32, DataType::S32, DataType::F16,
+ DataType::F32);
+
+ if(context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+ // Validate Cast Component
+ {
+ const auto properties = IGpuKernelComponent::Properties().stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run });
+ auto settings = ClComponentCast::Settings();
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentCast::validate(properties, arguments, attributes, settings));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+
+ return Status{};
+}
+
+Status GpuCast::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const CastAttributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !dst->has_valid_id());
+
+ // Auto initialize dst tensor info
+ TensorInfo dst_info_to_validate = *dst;
+ auto_init_if_empty(dst_info_to_validate, src->clone()->set_data_type(attributes.data_type()));
+
+ // Perform fusion test
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op(*sketch.gpu_context(), src, &dst_info_to_validate, attributes);
+}
+
+void GpuCast::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *dst,
+ const CastAttributes &attributes)
+{
+ // Assert validation
+ ARM_COMPUTE_ERROR_THROW_ON(GpuCast::validate_op(sketch, src, dst, attributes));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_LOG_PARAMS(src, dst, attributes);
+
+ // Auto initialize dst tensor info if empty
+ auto_init_if_empty(*dst, src->clone()->set_data_type(attributes.data_type()));
+
+ // Translate into components and add to component graph
+ GpuKernelComponentGraph &comp_graph = sketch.implementation().component_graph();
+ const auto *sketch_ctx = sketch.implementation().context();
+
+ if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ ARM_COMPUTE_ERROR_ON(sketch_ctx->cl_compile_context() == nullptr);
+
+ // Add Depthwise Conv2d Component
+ {
+ const auto properties = IGpuKernelComponent::Properties().stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run });
+ auto settings = ClComponentCast::Settings();
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentCast>(properties, arguments, attributes, settings);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp
new file mode 100644
index 0000000000..1ac49406a8
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "ClTemplateCast.h"
+
+#include "src/core/helpers/WindowHelpers.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuKernelComponentGroup.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ClTemplateCast::ClTemplateCast(ComponentId id, const ArgumentPack<ITensorInfo> &tensors, const Attributes &attributes)
+ : IGpuTemplateComponentWriter{ id, tensors }, _src{}, _dst{}, _attributes{ attributes }
+{
+ _src = this->tensors().get_const_tensor(TensorType::ACL_SRC_0);
+ _dst = this->tensors().get_const_tensor(TensorType::ACL_DST_0);
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
+}
+
+std::string ClTemplateCast::get_name() const
+{
+ const size_t src_size = data_size_from_type(_src->data_type());
+ const size_t dst_size = data_size_from_type(_dst->data_type());
+
+ return (src_size >= dst_size) ? "cast_down" : "cast_up";
+}
+
+std::string ClTemplateCast::get_component_code(const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+
+ const std::string kernel_name = get_name();
+
+ std::string code = R"_(
+//------------------ START KERNEL {{meta_kernel_id}} ---------------------
+// IN_0(src) {{src}}
+// OUT(dst, accum) {{dst}}
+
+TILE({{DATA_TYPE_OUT}}, M0, N0, {{dst}});
+TILE(uint, M0, 1, g_dst_indirect_y);
+{
+ {{src}}_offset_first_element_in_bytes += get_global_id(2) * {{src}}_stride_z;
+
+ TILE({{DATA_TYPE_IN}}, M0, N0, in_data);
+ T_LOAD({{DATA_TYPE_IN}}, M0, N0, BUFFER, {{src}}, g_ind_0, g_ind_1, 1, {{src}}_stride_y, in_data);
+)_";
+
+ code += R"_(
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+)_";
+
+ if(kernel_name == "cast_down" && is_data_type_quantized(_src->data_type()))
+ {
+ code += R"_(
+ in_data[m0].v ^= (VEC_DATA_TYPE({{DATA_TYPE_IN}}, N0))0x80;
+)_";
+ }
+
+ if(kernel_name == "cast_down" && (is_data_type_float(_src->data_type()) || _attributes.convert_policy() == ConvertPolicy::SATURATE))
+ {
+ code += R"_(
+ {{dst}}[m0].v = CONVERT_SAT(in_data[m0].v, VEC_DATA_TYPE({{DATA_TYPE_OUT}}, N0));
+)_";
+ }
+ else
+ {
+ code += R"_(
+ {{dst}}[m0].v = CONVERT(in_data[m0].v, VEC_DATA_TYPE({{DATA_TYPE_OUT}}, N0));
+)_";
+ }
+
+ code += R"_(
+ })
+)_";
+
+ code += R"_(
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ g_dst_indirect_y[i].v = (uint)min((int)(g_ind_1 + i), (int)({{arg_dst}}_w) - 1);
+ g_dst_indirect_y[i].v += (int)(g_ind_2 % {{arg_dst}}_h) * (int)({{arg_dst}}_w);
+ g_dst_indirect_y[i].v += (int)(g_ind_2 / {{arg_dst}}_h) * (int)({{arg_dst}}_w * {{arg_dst}}_h);
+ })
+}
+//------------------ END KERNEL {{meta_kernel_id}} ---------------------
+)_";
+
+ return code;
+}
+
+void ClTemplateCast::declare_variables(GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const
+{
+ vtable.declare_variable(
+ _src,
+ GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_4D_t_Buffer),
+ comp_group.is_intermediate_tensor(_src),
+ "src");
+
+ vtable.declare_variable(
+ _dst,
+ GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_4D_t_Buffer),
+ comp_group.is_intermediate_tensor(_dst),
+ "dst");
+}
+
+TagLUT ClTemplateCast::get_tag_lut(const GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+
+ TagLUT lut{};
+
+ // Arguments and global shared variables
+ lut["src"] = vtable.get_variable(_src);
+ lut["dst"] = vtable.get_variable(_dst);
+
+ const auto dst_argument = vtable.get_variable(comp_group.get_dst_tensors()[0]);
+ lut["arg_dst"] = dst_argument.uniq_name;
+
+ // Local build options
+ lut["meta_kernel_id"] = id();
+
+ lut["DATA_TYPE_IN"] = get_cl_type_from_data_type(_src->data_type());
+ lut["DATA_TYPE_OUT"] = get_cl_type_from_data_type(_dst->data_type());
+
+ return lut;
+}
+
+CLBuildOptions ClTemplateCast::get_build_options(const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+
+ const auto root_window = comp_group.get_root_component()->template_writer()->get_window();
+ const unsigned int n0 = root_window.x().step();
+ const unsigned int m0 = root_window.y().step();
+
+ // Set build options
+ CLBuildOptions build_opts{};
+ build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
+ build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(_src->dimension(0) % n0));
+ build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
+
+ return build_opts;
+}
+
+std::string ClTemplateCast::get_config_id() const
+{
+ std::string config_id{};
+
+ config_id += "_";
+ config_id += lower_string(string_from_data_type(_src->data_type()));
+ config_id += "_";
+ config_id += lower_string(string_from_data_type(_dst->data_type()));
+ config_id += "_";
+ config_id += support::cpp11::to_string(_src->dimension(0));
+ config_id += "_";
+ config_id += support::cpp11::to_string(_src->dimension(1));
+
+ return config_id;
+}
+
+std::set<std::string> ClTemplateCast::get_headers_list() const
+{
+ return std::set<std::string>{ "helpers.h", "tile_helpers.h" };
+}
+
+Window ClTemplateCast::get_window() const
+{
+ ARM_COMPUTE_ERROR_ON_MSG(_dst->tensor_shape().total_size() == 0U, "Destination tensor is not initialized");
+
+ const unsigned int n0 = adjust_vec_size(16 / _src->element_size(), _src->dimension(0));
+ Window win = calculate_max_window(*_dst, Steps(n0));
+ return win.collapse(win, Window::DimZ);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h
new file mode 100644
index 0000000000..08255ca5af
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateCast.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATECAST
+#define SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATECAST
+
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentCast.h"
+#include "src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.h"
+#include "src/dynamic_fusion/sketch/gpu/template_writer/IGpuTemplateComponentWriter.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ClTemplateCast final : public IGpuTemplateComponentWriter
+{
+public:
+ using Attributes = ClComponentCast::Attributes;
+
+ /** Constructor
+ *
+ * @param[in] id Component id
+ * @param[in] tensors Tensor arguments to the components
+ * @param[in] attributes Component attributes
+ */
+ ClTemplateCast(ComponentId id, const ArgumentPack<ITensorInfo> &tensors, const Attributes &attributes);
+ /** Prevent instances of this class from being copy constructed */
+ ClTemplateCast(const ClTemplateCast &cast) = delete;
+ /** Prevent instances of this class from being copied */
+ ClTemplateCast &operator=(const ClTemplateCast &cast) = delete;
+ /** Allow instances of this class to be move constructed */
+ ClTemplateCast(ClTemplateCast &&cast) = default;
+ /** Allow instances of this class to be moved */
+ ClTemplateCast &operator=(ClTemplateCast &&cast) = default;
+ /** Generate kernel component name */
+ std::string get_name() const override;
+ /** Generate kernel component code template
+ *
+ * @param[in] comp_group Component group of which the component is a part of
+ *
+ * @return std::string Component code
+ */
+ std::string get_component_code(const ComponentGroup &comp_group) const override;
+ /** Declare all variables used by the component in the @p vtable
+ *
+ * @param[out] vtable Variable table
+ * @param[in] comp_group Component group of which the component is a part of
+ */
+ void declare_variables(GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const override;
+ /** Generate the tag look-up table used to instantiate the component code.
+ *
+ * @param[in] vtable Variable table
+ * @param[in] comp_group Component group of which the component is a part of
+ *
+ * @return TagLUT Tag lookup table
+ */
+ TagLUT get_tag_lut(const GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const override;
+ /** Generate the build options used in the component
+ *
+ * @param[in] comp_group Component group of which the component is a part of
+ *
+ * @return CLBuildOptions Build options
+ */
+ CLBuildOptions get_build_options(const ComponentGroup &comp_group) const override;
+ /** Generate the component config id string used for tuning */
+ std::string get_config_id() const override;
+ /** Generate the header list used in the component */
+ std::set<std::string> get_headers_list() const override;
+ /** Generate the execution window for the component */
+ Window get_window() const override;
+
+private:
+ const ITensorInfo *_src;
+ const ITensorInfo *_dst;
+ const Attributes _attributes;
+};
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATECAST */
diff --git a/tests/validation/dynamic_fusion/gpu/cl/Cast.cpp b/tests/validation/dynamic_fusion/gpu/cl/Cast.cpp
new file mode 100644
index 0000000000..cb6c8c52f6
--- /dev/null
+++ b/tests/validation/dynamic_fusion/gpu/cl/Cast.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/ConvertPolicyDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+// Tolerance
+constexpr AbsoluteTolerance<float> one_tolerance(1);
+constexpr AbsoluteTolerance<float> zero_tolerance(0);
+
+/** Input data sets **/
+// QASYMM8
+const auto CastQASYMM8toF32Dataset = combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::F32));
+
+// U8
+const auto CastU8toS8Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S8));
+const auto CastU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
+const auto CastU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
+const auto CastU8toU32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U32));
+const auto CastU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
+const auto CastU8toF16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::F16));
+const auto CastU8toF32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::F32));
+
+// S8
+const auto CastS8toU8Dataset = combine(framework::dataset::make("DataType", DataType::S8), framework::dataset::make("DataType", DataType::U8));
+const auto CastS8toU16Dataset = combine(framework::dataset::make("DataType", DataType::S8), framework::dataset::make("DataType", DataType::U16));
+const auto CastS8toS16Dataset = combine(framework::dataset::make("DataType", DataType::S8), framework::dataset::make("DataType", DataType::S16));
+const auto CastS8toU32Dataset = combine(framework::dataset::make("DataType", DataType::S8), framework::dataset::make("DataType", DataType::U32));
+const auto CastS8toS32Dataset = combine(framework::dataset::make("DataType", DataType::S8), framework::dataset::make("DataType", DataType::S32));
+const auto CastS8toF16Dataset = combine(framework::dataset::make("DataType", DataType::S8), framework::dataset::make("DataType", DataType::F16));
+const auto CastS8toF32Dataset = combine(framework::dataset::make("DataType", DataType::S8), framework::dataset::make("DataType", DataType::F32));
+
+// U16
+const auto CastU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
+const auto CastU16toS8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::S8));
+const auto CastU16toS16Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::S16));
+const auto CastU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
+const auto CastU16toS32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::S32));
+const auto CastU16toF16Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::F16));
+const auto CastU16toF32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::F32));
+
+// S16
+const auto CastS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
+const auto CastS16toS8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S8));
+const auto CastS16toU16Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U16));
+const auto CastS16toU32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U32));
+const auto CastS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
+const auto CastS16toF16Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::F16));
+const auto CastS16toF32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::F32));
+
+// U32
+const auto CastU32toU8Dataset = combine(framework::dataset::make("DataType", DataType::U32), framework::dataset::make("DataType", DataType::U8));
+const auto CastU32toS8Dataset = combine(framework::dataset::make("DataType", DataType::U32), framework::dataset::make("DataType", DataType::S8));
+const auto CastU32toU16Dataset = combine(framework::dataset::make("DataType", DataType::U32), framework::dataset::make("DataType", DataType::U16));
+const auto CastU32toS16Dataset = combine(framework::dataset::make("DataType", DataType::U32), framework::dataset::make("DataType", DataType::S16));
+const auto CastU32toS32Dataset = combine(framework::dataset::make("DataType", DataType::U32), framework::dataset::make("DataType", DataType::S32));
+const auto CastU32toF16Dataset = combine(framework::dataset::make("DataType", DataType::U32), framework::dataset::make("DataType", DataType::F16));
+const auto CastU32toF32Dataset = combine(framework::dataset::make("DataType", DataType::U32), framework::dataset::make("DataType", DataType::F32));
+
+// S32
+const auto CastS32toU8Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::U8));
+const auto CastS32toS8Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::S8));
+const auto CastS32toU16Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::U16));
+const auto CastS32toS16Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::S16));
+const auto CastS32toU32Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::U32));
+const auto CastS32toF16Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::F16));
+const auto CastS32toF32Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::F32));
+
+// F16
+const auto CastF16toU8Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::U8));
+const auto CastF16toS8Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::S8));
+const auto CastF16toU16Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::U16));
+const auto CastF16toS16Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::S16));
+const auto CastF16toU32Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::U32));
+const auto CastF16toS32Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::S32));
+const auto CastF16toF32Dataset = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F32));
+
+// F32
+const auto CastF32toU8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::U8));
+const auto CastF32toS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::S8));
+const auto CastF32toU16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::U16));
+const auto CastF32toS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::S16));
+const auto CastF32toU32Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::U32));
+const auto CastF32toS32Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::S32));
+const auto CastF32toF16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F16));
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(DYNAMIC_FUSION)
+TEST_SUITE(CAST)
+
+template <typename T>
+using DynamicFusionCLCastToU8Fixture = DynamicFusionCastValidationFixture<CLTensor, CLAccessor, GpuCast, T, uint8_t>;
+template <typename T>
+using DynamicFusionCLCastToS8Fixture = DynamicFusionCastValidationFixture<CLTensor, CLAccessor, GpuCast, T, int8_t>;
+template <typename T>
+using DynamicFusionCLCastToU16Fixture = DynamicFusionCastValidationFixture<CLTensor, CLAccessor, GpuCast, T, uint16_t>;
+template <typename T>
+using DynamicFusionCLCastToS16Fixture = DynamicFusionCastValidationFixture<CLTensor, CLAccessor, GpuCast, T, int16_t>;
+template <typename T>
+using DynamicFusionCLCastToU32Fixture = DynamicFusionCastValidationFixture<CLTensor, CLAccessor, GpuCast, T, uint32_t>;
+template <typename T>
+using DynamicFusionCLCastToS32Fixture = DynamicFusionCastValidationFixture<CLTensor, CLAccessor, GpuCast, T, int32_t>;
+template <typename T>
+using DynamicFusionCLCastToF16Fixture = DynamicFusionCastValidationFixture<CLTensor, CLAccessor, GpuCast, T, half>;
+template <typename T>
+using DynamicFusionCLCastToF32Fixture = DynamicFusionCastValidationFixture<CLTensor, CLAccessor, GpuCast, T, float>;
+
+#define CAST_SUITE(NAME, idt, odt, type, dataset, tolerance) \
+ TEST_SUITE(NAME) \
+ FIXTURE_DATA_TEST_CASE(RunSmall, type, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), dataset), \
+ datasets::ConvertPolicies())) \
+ { \
+ validate(CLAccessor(_target), _reference, tolerance); \
+ } \
+ TEST_SUITE_END()
+
+// QASYMM8
+CAST_SUITE(QASYMM8_to_F32, DataType::QASYMM8, DataType::F32, DynamicFusionCLCastToF32Fixture<uint8_t>, CastQASYMM8toF32Dataset, zero_tolerance)
+
+// U8
+CAST_SUITE(U8_to_S8, DataType::U8, DataType::S8, DynamicFusionCLCastToS8Fixture<uint8_t>, CastU8toS8Dataset, zero_tolerance)
+CAST_SUITE(U8_to_U16, DataType::U8, DataType::U16, DynamicFusionCLCastToU16Fixture<uint8_t>, CastU8toU16Dataset, zero_tolerance)
+CAST_SUITE(U8_to_S16, DataType::U8, DataType::S16, DynamicFusionCLCastToS16Fixture<uint8_t>, CastU8toS16Dataset, zero_tolerance)
+CAST_SUITE(U8_to_U32, DataType::U8, DataType::U32, DynamicFusionCLCastToU32Fixture<uint8_t>, CastU8toU32Dataset, zero_tolerance)
+CAST_SUITE(U8_to_S32, DataType::U8, DataType::S32, DynamicFusionCLCastToS32Fixture<uint8_t>, CastU8toS32Dataset, zero_tolerance)
+CAST_SUITE(U8_to_F16, DataType::U8, DataType::F16, DynamicFusionCLCastToF16Fixture<uint8_t>, CastU8toF16Dataset, zero_tolerance)
+CAST_SUITE(U8_to_F32, DataType::U8, DataType::F32, DynamicFusionCLCastToF32Fixture<uint8_t>, CastU8toF32Dataset, zero_tolerance)
+
+// S8
+CAST_SUITE(S8_to_U8, DataType::S8, DataType::U8, DynamicFusionCLCastToU8Fixture<int8_t>, CastS8toU8Dataset, zero_tolerance)
+CAST_SUITE(S8_to_U16, DataType::S8, DataType::U16, DynamicFusionCLCastToU16Fixture<int8_t>, CastS8toU16Dataset, zero_tolerance)
+CAST_SUITE(S8_to_S16, DataType::S8, DataType::S16, DynamicFusionCLCastToS16Fixture<int8_t>, CastS8toS16Dataset, zero_tolerance)
+CAST_SUITE(S8_to_U32, DataType::S8, DataType::U32, DynamicFusionCLCastToU32Fixture<int8_t>, CastS8toU32Dataset, zero_tolerance)
+CAST_SUITE(S8_to_S32, DataType::S8, DataType::S32, DynamicFusionCLCastToS32Fixture<int8_t>, CastS8toS32Dataset, zero_tolerance)
+CAST_SUITE(S8_to_F16, DataType::S8, DataType::F16, DynamicFusionCLCastToF16Fixture<int8_t>, CastS8toF16Dataset, zero_tolerance)
+CAST_SUITE(S8_to_F32, DataType::S8, DataType::F32, DynamicFusionCLCastToF32Fixture<int8_t>, CastS8toF32Dataset, zero_tolerance)
+
+// U16
+CAST_SUITE(U16_to_U8, DataType::U16, DataType::U8, DynamicFusionCLCastToU8Fixture<uint16_t>, CastU16toU8Dataset, zero_tolerance)
+CAST_SUITE(U16_to_S8, DataType::U16, DataType::S8, DynamicFusionCLCastToS8Fixture<uint16_t>, CastU16toS8Dataset, zero_tolerance)
+CAST_SUITE(U16_to_S16, DataType::U16, DataType::S16, DynamicFusionCLCastToS16Fixture<uint16_t>, CastU16toS16Dataset, zero_tolerance)
+CAST_SUITE(U16_to_U32, DataType::U16, DataType::U32, DynamicFusionCLCastToU32Fixture<uint16_t>, CastU16toU32Dataset, zero_tolerance)
+CAST_SUITE(U16_to_S32, DataType::U16, DataType::S32, DynamicFusionCLCastToS32Fixture<uint16_t>, CastU16toS32Dataset, zero_tolerance)
+CAST_SUITE(U16_to_F16, DataType::U16, DataType::F16, DynamicFusionCLCastToF16Fixture<uint16_t>, CastU16toF16Dataset, zero_tolerance)
+CAST_SUITE(U16_to_F32, DataType::U16, DataType::F32, DynamicFusionCLCastToF32Fixture<uint16_t>, CastU16toF32Dataset, zero_tolerance)
+
+// S16
+CAST_SUITE(S16_to_U8, DataType::S16, DataType::U8, DynamicFusionCLCastToU8Fixture<int16_t>, CastS16toU8Dataset, zero_tolerance)
+CAST_SUITE(S16_to_S8, DataType::S16, DataType::S8, DynamicFusionCLCastToS8Fixture<int16_t>, CastS16toS8Dataset, zero_tolerance)
+CAST_SUITE(S16_to_U16, DataType::S16, DataType::U16, DynamicFusionCLCastToU16Fixture<int16_t>, CastS16toU16Dataset, zero_tolerance)
+CAST_SUITE(S16_to_U32, DataType::S16, DataType::U32, DynamicFusionCLCastToU32Fixture<int16_t>, CastS16toU32Dataset, zero_tolerance)
+CAST_SUITE(S16_to_S32, DataType::S16, DataType::S32, DynamicFusionCLCastToS32Fixture<int16_t>, CastS16toS32Dataset, zero_tolerance)
+CAST_SUITE(S16_to_F16, DataType::S16, DataType::F16, DynamicFusionCLCastToF16Fixture<int16_t>, CastS16toF16Dataset, zero_tolerance)
+CAST_SUITE(S16_to_F32, DataType::S16, DataType::F32, DynamicFusionCLCastToF32Fixture<int16_t>, CastS16toF32Dataset, zero_tolerance)
+
+// U32
+CAST_SUITE(U32_to_U8, DataType::U32, DataType::U8, DynamicFusionCLCastToU8Fixture<uint32_t>, CastU32toU8Dataset, zero_tolerance)
+CAST_SUITE(U32_to_S8, DataType::U32, DataType::S8, DynamicFusionCLCastToS8Fixture<uint32_t>, CastU32toS8Dataset, zero_tolerance)
+CAST_SUITE(U32_to_U16, DataType::U32, DataType::U16, DynamicFusionCLCastToU16Fixture<uint32_t>, CastU32toU16Dataset, zero_tolerance)
+CAST_SUITE(U32_to_S16, DataType::U32, DataType::S16, DynamicFusionCLCastToS16Fixture<uint32_t>, CastU32toS16Dataset, zero_tolerance)
+CAST_SUITE(U32_to_S32, DataType::U32, DataType::S32, DynamicFusionCLCastToS32Fixture<uint32_t>, CastU32toS32Dataset, zero_tolerance)
+CAST_SUITE(U32_to_F16, DataType::U32, DataType::F16, DynamicFusionCLCastToF16Fixture<uint32_t>, CastU32toF16Dataset, zero_tolerance)
+CAST_SUITE(U32_to_F32, DataType::U32, DataType::F32, DynamicFusionCLCastToF32Fixture<uint32_t>, CastU32toF32Dataset, zero_tolerance)
+
+// S32
+CAST_SUITE(S32_to_U8, DataType::S32, DataType::U8, DynamicFusionCLCastToU8Fixture<int32_t>, CastS32toU8Dataset, zero_tolerance)
+CAST_SUITE(S32_to_S8, DataType::S32, DataType::S8, DynamicFusionCLCastToS8Fixture<int32_t>, CastS32toS8Dataset, zero_tolerance)
+CAST_SUITE(S32_to_U16, DataType::S32, DataType::U16, DynamicFusionCLCastToU16Fixture<int32_t>, CastS32toU16Dataset, zero_tolerance)
+CAST_SUITE(S32_to_S16, DataType::S32, DataType::S16, DynamicFusionCLCastToS16Fixture<int32_t>, CastS32toS16Dataset, zero_tolerance)
+CAST_SUITE(S32_to_U32, DataType::S32, DataType::U32, DynamicFusionCLCastToU32Fixture<int32_t>, CastS32toU32Dataset, zero_tolerance)
+CAST_SUITE(S32_to_F16, DataType::S32, DataType::F16, DynamicFusionCLCastToF16Fixture<int32_t>, CastS32toF16Dataset, zero_tolerance)
+CAST_SUITE(S32_to_F32, DataType::S32, DataType::F32, DynamicFusionCLCastToF32Fixture<int32_t>, CastS32toF32Dataset, zero_tolerance)
+
+// F16
+CAST_SUITE(F16_to_U8, DataType::F16, DataType::U8, DynamicFusionCLCastToU8Fixture<half>, CastF16toU8Dataset, one_tolerance)
+CAST_SUITE(F16_to_S8, DataType::F16, DataType::S8, DynamicFusionCLCastToS8Fixture<half>, CastF16toS8Dataset, one_tolerance)
+CAST_SUITE(F16_to_U16, DataType::F16, DataType::U16, DynamicFusionCLCastToU16Fixture<half>, CastF16toU16Dataset, one_tolerance)
+CAST_SUITE(F16_to_S16, DataType::F16, DataType::S16, DynamicFusionCLCastToS16Fixture<half>, CastF16toS16Dataset, one_tolerance)
+CAST_SUITE(F16_to_U32, DataType::F16, DataType::U32, DynamicFusionCLCastToU32Fixture<half>, CastF16toU32Dataset, one_tolerance)
+CAST_SUITE(F16_to_S32, DataType::F16, DataType::S32, DynamicFusionCLCastToS32Fixture<half>, CastF16toS32Dataset, one_tolerance)
+CAST_SUITE(F16_to_F32, DataType::F16, DataType::F32, DynamicFusionCLCastToF32Fixture<half>, CastF16toF32Dataset, zero_tolerance)
+
+// F32
+CAST_SUITE(F32_to_U8, DataType::F32, DataType::U8, DynamicFusionCLCastToU8Fixture<float>, CastF32toU8Dataset, one_tolerance)
+CAST_SUITE(F32_to_S8, DataType::F32, DataType::S8, DynamicFusionCLCastToS8Fixture<float>, CastF32toS8Dataset, one_tolerance)
+CAST_SUITE(F32_to_U16, DataType::F32, DataType::U16, DynamicFusionCLCastToU16Fixture<float>, CastF32toU16Dataset, one_tolerance)
+CAST_SUITE(F32_to_S16, DataType::F32, DataType::S16, DynamicFusionCLCastToS16Fixture<float>, CastF32toS16Dataset, one_tolerance)
+CAST_SUITE(F32_to_U32, DataType::F32, DataType::U32, DynamicFusionCLCastToU32Fixture<float>, CastF32toU32Dataset, one_tolerance)
+CAST_SUITE(F32_to_S32, DataType::F32, DataType::S32, DynamicFusionCLCastToS32Fixture<float>, CastF32toS32Dataset, one_tolerance)
+CAST_SUITE(F32_to_F16, DataType::F32, DataType::F16, DynamicFusionCLCastToF16Fixture<float>, CastF32toF16Dataset, zero_tolerance)
+
+TEST_SUITE_END() // CAST
+TEST_SUITE_END() // DYNAMIC_FUSION
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
new file mode 100644
index 0000000000..8553472fb9
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE
+#define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/DepthConvertLayer.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
+class DynamicFusionCastValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy)
+ {
+ _target = compute_target(shape, dt_in, dt_out, policy);
+ _reference = compute_reference(shape, dt_in, dt_out, policy);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, DataType dt_in, DataType dt_out)
+ {
+ // Restricting range to avoid inf values
+ if(dt_out == DataType::F16)
+ {
+ constexpr int signed_min = -32000;
+ constexpr int signed_max = 32000;
+ constexpr int unsigned_min = 0;
+ constexpr int unsigned_max = 65000;
+
+ switch(dt_in)
+ {
+ case DataType::U8:
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ case DataType::S8:
+ case DataType::F32:
+ {
+ library->fill_tensor_uniform(tensor, i);
+ break;
+ }
+ case DataType::U16:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<uint16_t>(unsigned_min), static_cast<uint16_t>(unsigned_max));
+ break;
+ }
+ case DataType::S16:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<int16_t>(signed_min), static_cast<int16_t>(signed_max));
+ break;
+ }
+ case DataType::U32:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<uint32_t>(unsigned_min), static_cast<uint32_t>(unsigned_max));
+ break;
+ }
+ case DataType::S32:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(signed_min), static_cast<int32_t>(signed_max));
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+ }
+ else
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ // Given input is in nchw format
+ TensorType compute_target(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
+ {
+ // Create a new workload sketch
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
+ GpuWorkloadSketch sketch{ &gpu_ctx };
+
+ // Create sketch tensors
+ TensorInfo src_info = sketch.create_tensor_info(TensorInfo(shape, 1, dt_in, DataLayout::NCHW)); // layout is not important
+ TensorInfo dst_info = sketch.create_tensor_info();
+
+ CastAttributes attributes;
+ attributes.convert_policy(policy).data_type(dt_out);
+
+ FunctionType::create_op(sketch, &src_info, &dst_info, attributes);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for(auto &data : runtime.get_auxiliary_tensors())
+ {
+ auto tensor = data.first;
+ const auto aux_mem_req = data.second;
+ tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment);
+ tensor->allocator()->allocate();
+ }
+
+ // Construct user tensors
+ TensorType t_src{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_src.allocator()->init(src_info);
+ t_dst.allocator()->init(dst_info);
+
+ // Allocate and fill user tensors
+ t_src.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_src), 0, dt_in, dt_out);
+
+ // Run runtime
+ runtime.run({ &t_src, &t_dst });
+ return t_dst;
+ }
+
+ SimpleTensor<T2> compute_reference(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
+ {
+ // Create reference
+ SimpleTensor<T1> src{ shape, dt_in, 1 };
+
+ // Fill reference
+ fill(src, 0, dt_in, dt_out);
+
+ return reference::depth_convert<T1, T2>(src, dt_out, policy, 0);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T2> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE */
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 0122229ed2..515e568657 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -39,6 +39,7 @@
#include "arm_compute/core/experimental/IPostOp.h"
#include "arm_compute/core/experimental/PostOps.h"
#include "arm_compute/dynamic_fusion/sketch/OperatorAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.h"
#include "arm_compute/runtime/CL/CLTunerTypes.h"
#include "arm_compute/runtime/CL/CLTypes.h"
@@ -3433,6 +3434,35 @@ inline std::string to_string(const experimental::dynamic_fusion::Conv2dAttribute
return str.str();
}
+/** Formatted output of the arm_compute::experimental::dynamic_fusion::CastAttributes type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] cast_attr arm_compute::experimental::dynamic_fusion::CastAttributes type to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const experimental::dynamic_fusion::CastAttributes &cast_attr)
+{
+ os << "CastAttributes="
+ << "["
+ << "Data Type=" << cast_attr.data_type() << ", "
+ << "Convert Policy=" << cast_attr.convert_policy() << "]";
+
+ return os;
+}
+/** Formatted output of the arm_compute::experimental::dynamic_fusion::CastAttributes type.
+ *
+ * @param[in] cast_attr arm_compute::experimental::dynamic_fusion::CastAttributes type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const experimental::dynamic_fusion::CastAttributes &cast_attr)
+{
+ std::stringstream str;
+ str << cast_attr;
+ return str.str();
+}
+
/** Formatted output of the arm_compute::experimental::dynamic_fusion::DepthwiseConv2dAttributes type.
*
* @param[out] os Output stream.