aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Sujak <jakub.sujak@arm.com>2022-12-02 16:09:06 +0000
committerGunes Bayir <gunes.bayir@arm.com>2022-12-28 11:01:09 +0000
commit8ae571454792327fc40641c72fe0b8de1e7d334f (patch)
tree928028fa30d52a87413db16ed3abc4044bf07eec
parent8468371b3e2ec42ee0b9b670d45d99eb1015574b (diff)
downloadComputeLibrary-8ae571454792327fc40641c72fe0b8de1e7d334f.tar.gz
Add Resize/Scale operator to Dynamic Fusion interface
Resolves: COMPMID-5521 Change-Id: Id38a4ce18f9ea8805a151acb064e72795535d1a0 Signed-off-by: Jakub Sujak <jakub.sujak@arm.com> Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8859 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp4
-rw-r--r--arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h87
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h99
-rw-r--r--filelist.json4
-rw-r--r--src/dynamic_fusion/sketch/attributes/ResizeAttributes.cpp90
-rw-r--r--src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.cpp85
-rw-r--r--src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.h127
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp179
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp1
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.cpp310
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.h120
-rw-r--r--tests/datasets/ScaleValidationDataset.h18
-rw-r--r--tests/validation/dynamic_fusion/gpu/cl/Resize.cpp529
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h264
-rw-r--r--utils/TypePrinter.h33
15 files changed, 1947 insertions, 3 deletions
diff --git a/Android.bp b/Android.bp
index 963dd84f86..072bdb9fa5 100644
--- a/Android.bp
+++ b/Android.bp
@@ -591,6 +591,7 @@ cc_library_static {
"src/dynamic_fusion/sketch/attributes/CastAttributes.cpp",
"src/dynamic_fusion/sketch/attributes/ClampAttributes.cpp",
"src/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.cpp",
+ "src/dynamic_fusion/sketch/attributes/ResizeAttributes.cpp",
"src/dynamic_fusion/sketch/gpu/GpuKernelArgument.cpp",
"src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp",
"src/dynamic_fusion/sketch/gpu/GpuKernelComponentGroup.cpp",
@@ -604,6 +605,7 @@ cc_library_static {
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentElementwiseBinary.cpp",
+ "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuAdd.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp",
@@ -611,6 +613,7 @@ cc_library_static {
"src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp",
+ "src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp",
"src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateActivation.cpp",
@@ -618,6 +621,7 @@ cc_library_static {
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp",
+ "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateStore.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp",
"src/gpu/cl/ClContext.cpp",
diff --git a/arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h b/arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h
new file mode 100644
index 0000000000..8992693cd1
--- /dev/null
+++ b/arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_ATTRIBUTES_RESIZEATTRIBUTES
+#define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_ATTRIBUTES_RESIZEATTRIBUTES
+
+#include "arm_compute/core/PixelValue.h"
+#include "arm_compute/core/Types.h"
+#include <cstdint>
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+/** Attributes are backend-agnostic parameters (in addition to the input/output tensors) of an operator.
+ */
+
+/** Resize attributes */
+class ResizeAttributes
+{
+public:
+ /** Set output width */
+ ResizeAttributes &output_width(int32_t output_width);
+
+ /** Get output width */
+ int32_t output_width() const;
+
+ /** Set output height */
+ ResizeAttributes &output_height(int32_t output_height);
+
+ /** Get output height */
+ int32_t output_height() const;
+
+ /** Set interpolation policy */
+ ResizeAttributes &interpolation_policy(InterpolationPolicy interpolation_policy);
+
+ /** Get interpolation policy */
+ InterpolationPolicy interpolation_policy() const;
+
+ /** Set sampling policy */
+ ResizeAttributes &sampling_policy(SamplingPolicy sampling_policy);
+
+ /** Get sampling policy */
+ SamplingPolicy sampling_policy() const;
+
+ /** Set align corners */
+ ResizeAttributes &align_corners(bool align_corners);
+
+ /** Get align corners */
+ bool align_corners() const;
+
+private:
+ int32_t _output_width{};
+ int32_t _output_height{};
+ InterpolationPolicy _interpolation_policy{ InterpolationPolicy::BILINEAR };
+ SamplingPolicy _sampling_policy{ SamplingPolicy::CENTER };
+ bool _align_corners{ false };
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_ATTRIBUTES_RESIZEATTRIBUTES */
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h
new file mode 100644
index 0000000000..1387bf1cf0
--- /dev/null
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPURESIZE
+#define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPURESIZE
+
+#include "arm_compute/core/ITensorInfo.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+/** Forward declaration */
+class GpuWorkloadContext;
+class GpuWorkloadSketch;
+
+/** Operator interface. */
+class GpuResize final
+{
+public:
+ /** Attributes are a set of backend-agnostic parameters that define what an operator does */
+ using Attributes = ResizeAttributes;
+
+ /** Create an operator and fuse it into the workload sketch.
+ * @note If @ref validate_op() fails, the creation also fails and may throw an error.
+ * @note If @ref validate_op() fails, @p sketch remains unchanged and valid.
+ *
+ * Valid data type configurations:
+ * |src |dst |
+ * |:--------------|:--------------|
+ * |QASYMM8 |QASYMM8 |
+ * |QASYMM8_SIGNED |QASYMM8_SIGNED |
+ * |F16 |F16 |
+ * |F32 |F32 |
+ * |U8 |U8 |
+ * |S16 |S16 |
+ *
+ * Valid data layouts:
+ * - NHWC
+ *
+ * @param[in,out] sketch Workload sketch into which the operator will be fused
+ * @param[in] src Left hand side tensor info.
+ * @param[out] dst Destination tensor info.
+ * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
+ * @param[in] attributes Operator attributes
+ */
+ static void create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *dst,
+ const Attributes &attributes);
+ /** Check if the operator configuration is supported, irrespective of fusion
+ *
+ * @param[in] context Workload context within which the operator is running
+ * @param[in] src Left hand side tensor info.
+ * @param[out] dst Destination tensor info.
+ * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
+ * @param[in] attributes Operator attributes
+ */
+ static Status is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const Attributes &attributes);
+ /** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch.
+ * Similar to @ref GpuResize::create_op()
+ */
+ static Status validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const Attributes &attributes);
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPURESIZE */
diff --git a/filelist.json b/filelist.json
index 146731b95e..33f6fc3178 100644
--- a/filelist.json
+++ b/filelist.json
@@ -2202,6 +2202,7 @@
"src/dynamic_fusion/sketch/attributes/CastAttributes.cpp",
"src/dynamic_fusion/sketch/attributes/ClampAttributes.cpp",
"src/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.cpp",
+ "src/dynamic_fusion/sketch/attributes/ResizeAttributes.cpp",
"src/dynamic_fusion/sketch/OperatorAttributes.cpp",
"src/dynamic_fusion/sketch/gpu/GpuKernelArgument.cpp",
"src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp",
@@ -2216,12 +2217,14 @@
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentElementwiseBinary.cpp",
+ "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuAdd.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuClamp.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp",
+ "src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp",
"src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateActivation.cpp",
@@ -2229,6 +2232,7 @@
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp",
+ "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateStore.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp"
diff --git a/src/dynamic_fusion/sketch/attributes/ResizeAttributes.cpp b/src/dynamic_fusion/sketch/attributes/ResizeAttributes.cpp
new file mode 100644
index 0000000000..1919dbc72d
--- /dev/null
+++ b/src/dynamic_fusion/sketch/attributes/ResizeAttributes.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ResizeAttributes &ResizeAttributes::output_width(int32_t output_width)
+{
+ _output_width = output_width;
+ return *this;
+}
+
+int32_t ResizeAttributes::output_width() const
+{
+ return _output_width;
+}
+
+ResizeAttributes &ResizeAttributes::output_height(int32_t output_height)
+{
+ _output_height = output_height;
+ return *this;
+}
+
+int32_t ResizeAttributes::output_height() const
+{
+ return _output_height;
+}
+
+ResizeAttributes &ResizeAttributes::interpolation_policy(InterpolationPolicy interpolation_policy)
+{
+ _interpolation_policy = interpolation_policy;
+ return *this;
+}
+
+InterpolationPolicy ResizeAttributes::interpolation_policy() const
+{
+ return _interpolation_policy;
+}
+
+ResizeAttributes &ResizeAttributes::sampling_policy(SamplingPolicy sampling_policy)
+{
+ _sampling_policy = sampling_policy;
+ return *this;
+}
+
+SamplingPolicy ResizeAttributes::sampling_policy() const
+{
+ return _sampling_policy;
+}
+
+ResizeAttributes &ResizeAttributes::align_corners(bool align_corners)
+{
+ _align_corners = align_corners;
+ return *this;
+}
+
+bool ResizeAttributes::align_corners() const
+{
+ return _align_corners;
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.cpp b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.cpp
new file mode 100644
index 0000000000..895b854ae2
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ClComponentResize.h"
+
+#include "arm_compute/core/Error.h"
+#include "src/core/CL/CLValidate.h"
+#include "src/core/utils/ScaleUtils.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+Status ClComponentResize::validate(const IGpuKernelComponent::Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const ClComponentResize::Attributes &attributes)
+{
+ ARM_COMPUTE_UNUSED(properties);
+
+ const ITensorInfo *src = tensors.get_const_tensor(TensorType::ACL_SRC_0);
+ const ITensorInfo *dst = tensors.get_const_tensor(TensorType::ACL_DST_0);
+
+ // Mismatching data types and quantization info
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
+
+ // Device requirements met
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
+
+ // Align corners and sampling policy conformance
+ ARM_COMPUTE_RETURN_ERROR_ON(attributes.align_corners() && !arm_compute::scale_utils::is_align_corners_allowed_sampling_policy(attributes.sampling_policy()));
+
+ // All tensor infos are initialized
+ ARM_COMPUTE_RETURN_ERROR_ON(src->tensor_shape().total_size() == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(dst->tensor_shape().total_size() == 0);
+
+ return Status();
+}
+
+ClComponentResize::ClComponentResize(ComponentId id,
+ const IGpuKernelComponent::Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const ClComponentResize::Attributes &attributes)
+ : IGpuKernelComponent{ id, properties, tensors },
+ _component_writer{ std::make_unique<ClTemplateResize>(id, tensors, attributes) }
+{
+}
+
+ClComponentResize::~ClComponentResize()
+{
+}
+
+const IGpuTemplateComponentWriter *ClComponentResize::template_writer() const
+{
+ return _component_writer.get();
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.h b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.h
new file mode 100644
index 0000000000..87d5a61ce3
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTRESIZE
+#define SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTRESIZE
+
+#include "arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h"
+#include "src/dynamic_fusion/sketch/gpu/components/IGpuKernelComponent.h"
+
+namespace arm_compute
+{
+/** Forward declaration */
+class ITensorInfo;
+namespace experimental
+{
+namespace dynamic_fusion
+{
+/** Forward declaration */
+template <typename T>
+class ArgumentPack;
+
+/** Forward declaration */
+class ClTemplateResize;
+
+class ClComponentResize final : public IGpuKernelComponent
+{
+public:
+ /** Attributes are a set of backend-agnostic parameters that define what a component does */
+ using Attributes = ResizeAttributes;
+
+ /** Validate the component
+ *
+ * @param[in] properties Component properties @ref Properties
+ * @param[in,out] tensors Tensor arguments to the component
+ * @param[in] attributes Component attributes @ref Attributes
+ *
+ * @return Status Validation results
+ *
+ * Tensor argument names:
+ * - ACL_SRC_0: Input
+ * - ACL_DST_0: Output
+ *
+ * Tensor argument constness:
+ * - ACL_SRC_0: Const
+ * - ACL_DST_0: Const
+ *
+ * Valid data layouts:
+ * - NHWC
+ *
+ ** Valid data type configurations:
+ * |ACL_SRC_0 |ACL_DST_0 |
+ * |:--------------|:--------------|
+ * |QASYMM8 |QASYMM8 |
+ * |QASYMM8_SIGNED |QASYMM8_SIGNED |
+ * |F16 |F16 |
+ * |F32 |F32 |
+ * |U8 |U8 |
+ * |S16 |S16 |
+ */
+ static Status validate(
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes);
+
+ /** Constructor
+ *
+ * Similar to @ref ClComponentResize::validate()
+ */
+ ClComponentResize(ComponentId id,
+ const Properties &properties,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes);
+
+ /** Destructor */
+ ~ClComponentResize() override;
+
+ /** Prevent instances of this class from being copy constructed */
+ ClComponentResize(const ClComponentResize &component) = delete;
+
+ /** Prevent instances of this class from being copied */
+ ClComponentResize &operator=(const ClComponentResize &component) = delete;
+
+ /** Allow instances of this class to be move constructed */
+ ClComponentResize(ClComponentResize &&component) = default;
+
+ /** Allow instances of this class to be moved */
+ ClComponentResize &operator=(ClComponentResize &&component) = default;
+
+ /** Get template writer for the component */
+ const IGpuTemplateComponentWriter *template_writer() const override;
+
+ /** Get component type */
+ GpuComponentType type() const override
+ {
+ return GpuComponentType::Complex;
+ }
+
+private:
+ std::unique_ptr<ClTemplateResize> _component_writer;
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTRESIZE */
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp
new file mode 100644
index 0000000000..aa45f4c1a5
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.h"
+
+#include "src/common/utils/Log.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+void calculate_and_init_dst_if_empty(ITensorInfo *dst, const ITensorInfo *src, const ResizeAttributes &attributes)
+{
+ if(dst->total_size() == 0U)
+ {
+ TensorShape out_shape = src->tensor_shape();
+
+ out_shape.set(1, attributes.output_width());
+ out_shape.set(2, attributes.output_height());
+
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(out_shape));
+ }
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+}
+Status GpuResize::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const Attributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+
+ // Auto initialize dst tensor info
+ TensorInfo dst_info_to_validate = *dst;
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, attributes);
+
+ // Check support level
+ // Data type
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::U8, DataType::S16, DataType::F16, DataType::F32);
+ // Data layout
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
+ // Interpolation policy
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(attributes.interpolation_policy() != InterpolationPolicy::NEAREST_NEIGHBOR && attributes.interpolation_policy() != InterpolationPolicy::BILINEAR,
+ "Interpolation policy must be NEAREST_NEIGHBOR or BILINEAR");
+
+ // Check components
+ if(context.gpu_language() == GpuLanguage::OpenCL)
+ {
+ const auto cl_compile_ctx = context.cl_compile_context();
+ ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr);
+
+ // Validate Activation Component
+ {
+ const KernelProperties properties = IGpuKernelComponent::Properties().stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run });
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClComponentResize::validate(properties, arguments, attributes));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language");
+ }
+
+ return Status{};
+}
+
+Status GpuResize::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *dst,
+ const GpuResize::Attributes &attributes)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !dst->has_valid_id());
+
+ // Auto initialize dst tensor info if empty
+ TensorInfo dst_info_to_validate = *dst;
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, attributes);
+
+ // Perform fusion test
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op),
+ "Operator fusion test failed. This operator cannot be fused into the workload");
+
+ // Check if configuration is supported
+ return is_supported_op(*sketch.gpu_context(), src, &dst_info_to_validate, attributes);
+}
+
+void GpuResize::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *dst,
+ const GpuResize::Attributes &attributes)
+{
+ // Assert validation
+ ARM_COMPUTE_ERROR_THROW_ON(GpuResize::validate_op(sketch, src, dst, attributes));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_LOG_PARAMS(src, dst, attributes);
+
+ // Auto initialize dst tensor info if empty
+ calculate_and_init_dst_if_empty(dst, src, attributes);
+
+ // Translate into components and add to component graph
+ GpuKernelComponentGraph &comp_graph = sketch.implementation().component_graph();
+ const auto *sketch_ctx = sketch.implementation().context();
+
+ if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(sketch_ctx->cl_compile_context());
+
+ // Add Resize Component
+ {
+ const auto properties = IGpuKernelComponent::Properties().stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run });
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentResize>(properties, arguments, attributes);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp
index 221addb7b5..26399c50a9 100644
--- a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp
@@ -330,6 +330,7 @@ CLBuildOptions ClTemplateDirectConv2d::get_build_options(const ComponentGroup &c
// to disable -cl-finite-math-only, we only include -cl-unsafe-math-optimizations
build_opts.add_option("-cl-unsafe-math-optimizations");
}
+
build_opts.add_option("-DIS_TILED");
build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.cpp
new file mode 100644
index 0000000000..7ee79e82af
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.cpp
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ClTemplateResize.h"
+
+#include "src/core/helpers/WindowHelpers.h"
+#include "src/core/utils/ScaleUtils.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuKernelComponentGroup.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ClTemplateResize::ClTemplateResize(ComponentId id, const ArgumentPack<ITensorInfo> &tensors, const ClTemplateResize::Attributes &attributes)
+ : IGpuTemplateComponentWriter{ id, tensors }, _src{}, _dst{}, _attributes{ attributes }
+{
+ _src = this->tensors().get_const_tensor(TensorType::ACL_SRC_0);
+ _dst = this->tensors().get_const_tensor(TensorType::ACL_DST_0);
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
+}
+
+std::string ClTemplateResize::get_name() const
+{
+ return _attributes.interpolation_policy() == InterpolationPolicy::BILINEAR ? "resize_bilinear" : "resize_nearest";
+}
+
+std::string ClTemplateResize::get_component_code(const IGpuTemplateComponentWriter::ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+
+ std::string code = R"_(
+//------------------ START KERNEL {{meta_kernel_id}} ---------------------
+TILE({{DST_DATA_TYPE}}, 1, N0, {{dst}});
+TILE(uint, 1, 1, g_dst_indirect_y);
+{
+ const int yo = g_ind_2 % {{arg_dst}}_h;
+ const int bout = g_ind_2 / {{arg_dst}}_h;
+)_";
+
+ if(_attributes.interpolation_policy() == InterpolationPolicy::NEAREST_NEIGHBOR)
+ {
+ if(_attributes.sampling_policy() == SamplingPolicy::TOP_LEFT)
+ {
+ code += R"_(
+ float xi_f = (g_ind_1 * SCALE_X);
+ float yi_f = (yo * SCALE_Y);
+)_";
+ }
+ else
+ {
+ code += R"_(
+ float xi_f = ((g_ind_1 + 0.5f) * SCALE_X);
+ float yi_f = ((yo + 0.5f) * SCALE_Y);
+)_";
+ }
+
+ if(_attributes.align_corners())
+ {
+ code += R"_(
+ xi_f = round(xi_f);
+ yi_f = round(yi_f);
+)_";
+ }
+
+ code += R"_(
+ const int xi0 = clamp((int)xi_f, 0, (int){{src}}_w - 1);
+ const int yi0 = clamp((int)yi_f, 0, (int){{src}}_h - 1);
+
+ T_LOAD_NHWC_WITH_DILATION({{SRC_DATA_TYPE}}, 1, 1, N0, {{SRC_TENSOR_TYPE}}, {{src}}, bout, yi0, xi0, g_ind_0, {{src}}_w, {{src}}_h, 1, 1, false, {{dst}});
+)_";
+ }
+ else if(_attributes.interpolation_policy() == InterpolationPolicy::BILINEAR)
+ {
+ if(_attributes.sampling_policy() == SamplingPolicy::TOP_LEFT)
+ {
+ code += R"_(
+ float xi_f = (g_ind_1 * SCALE_X);
+ float yi_f = (yo * SCALE_Y);
+)_";
+ }
+ else
+ {
+ code += R"_(
+ float xi_f = ((g_ind_1 + 0.5f) * SCALE_X - 0.5f);
+ float yi_f = ((yo + 0.5f) * SCALE_Y - 0.5f);
+)_";
+ }
+
+ code += R"_(
+ const int xi = (int)floor(xi_f);
+ const int yi = (int)floor(yi_f);
+
+ TILE({{SRC_DATA_TYPE}}, 1, N0, in00);
+ TILE({{SRC_DATA_TYPE}}, 1, N0, in01);
+ TILE({{SRC_DATA_TYPE}}, 1, N0, in10);
+ TILE({{SRC_DATA_TYPE}}, 1, N0, in11);
+
+ in00[0].v = {{CONSTANT_VALUE}};
+ in01[0].v = {{CONSTANT_VALUE}};
+ in10[0].v = {{CONSTANT_VALUE}};
+ in11[0].v = {{CONSTANT_VALUE}};
+
+ const int xi0 = clamp(xi, 0, (int){{src}}_w - 1);
+ const int yi0 = clamp(yi, 0, (int){{src}}_h - 1);
+ const int xi1 = clamp(xi + 1, 0, (int){{src}}_w - 1);
+ const int yi1 = clamp(yi + 1, 0, (int){{src}}_h - 1);
+
+ T_LOAD_NHWC_WITH_DILATION({{SRC_DATA_TYPE}}, 1, 1, N0, {{SRC_TENSOR_TYPE}}, {{src}}, bout, yi0, xi0, g_ind_0, {{src}}_w, {{src}}_h, 1, 1, false, in00);
+ T_LOAD_NHWC_WITH_DILATION({{SRC_DATA_TYPE}}, 1, 1, N0, {{SRC_TENSOR_TYPE}}, {{src}}, bout, yi0, xi1, g_ind_0, {{src}}_w, {{src}}_h, 1, 1, false, in01);
+ T_LOAD_NHWC_WITH_DILATION({{SRC_DATA_TYPE}}, 1, 1, N0, {{SRC_TENSOR_TYPE}}, {{src}}, bout, yi1, xi0, g_ind_0, {{src}}_w, {{src}}_h, 1, 1, false, in10);
+ T_LOAD_NHWC_WITH_DILATION({{SRC_DATA_TYPE}}, 1, 1, N0, {{SRC_TENSOR_TYPE}}, {{src}}, bout, yi1, xi1, g_ind_0, {{src}}_w, {{src}}_h, 1, 1, false, in11);
+)_";
+
+ if(is_data_type_float(_src->data_type()))
+ {
+ code += R"_(
+ const {{SRC_DATA_TYPE}} a = ({{SRC_DATA_TYPE}})(xi_f - (float)xi);
+ const {{SRC_DATA_TYPE}} b = ({{SRC_DATA_TYPE}})(1.f - a);
+ const {{SRC_DATA_TYPE}} a1 = ({{SRC_DATA_TYPE}})(yi_f - (float)yi);
+ const {{SRC_DATA_TYPE}} b1 = ({{SRC_DATA_TYPE}})(1.f - a1);
+
+ // Calculate the output
+ {{dst}}[0].v = ((in00[0].v * b * b1) + (in01[0].v * a * b1) + (in10[0].v * b * a1) + (in11[0].v * a * a1));
+)_";
+ }
+ else
+ {
+ code += R"_(
+ TILE(float, 1, N0, out_f);
+ TILE(float, 1, N0, in00_f);
+ TILE(float, 1, N0, in01_f);
+ TILE(float, 1, N0, in10_f);
+ TILE(float, 1, N0, in11_f);
+
+ const float a = (xi_f - (float)xi);
+ const float b = (1.f - a);
+ const float a1 = (yi_f - (float)yi);
+ const float b1 = (1.f - a1);
+)_"
+ // Dequantize
+ R"_(
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ in00_f[0].s[n0] = ((float)in00[0].s[n0] - (float){{OFFSET}}) * (float){{SCALE}};
+ in01_f[0].s[n0] = ((float)in01[0].s[n0] - (float){{OFFSET}}) * (float){{SCALE}};
+ in10_f[0].s[n0] = ((float)in10[0].s[n0] - (float){{OFFSET}}) * (float){{SCALE}};
+ in11_f[0].s[n0] = ((float)in11[0].s[n0] - (float){{OFFSET}}) * (float){{SCALE}};
+ })
+)_"
+ // Calculate the output in the floating-point domain
+ R"_(
+ out_f[0].v = ((in00_f[0].v * b * b1) + (in01_f[0].v * a * b1) + (in10_f[0].v * b * a1) + (in11_f[0].v * a * a1));
+)_"
+ // Quantize
+ R"_(
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ {{dst}}[0].s[n0] = CONVERT_SAT(out_f[0].s[n0] / (float){{SCALE}} + (float){{OFFSET}}, {{DST_DATA_TYPE}});
+ })
+)_";
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unsupported interpolation policy");
+ }
+
+ code += R"_(
+ g_dst_indirect_y[0].v = g_ind_1 + (yo * (int)({{arg_dst}}_w)) + bout * (int)({{arg_dst}}_w * {{arg_dst}}_h);
+}
+//------------------ END KERNEL {{meta_kernel_id}} ---------------------
+)_";
+
+ return code;
+}
+
+void ClTemplateResize::declare_variables(GpuKernelVariableTable &vtable, const IGpuTemplateComponentWriter::ComponentGroup &comp_group) const
+{
+ vtable.declare_variable(
+ _src,
+ GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_4D_t_Buffer),
+ comp_group.is_intermediate_tensor(_src),
+ "src");
+
+ vtable.declare_variable(
+ _dst,
+ GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_4D_t_Buffer),
+ comp_group.is_intermediate_tensor(_dst),
+ "dst");
+}
+
+TagLUT ClTemplateResize::get_tag_lut(const GpuKernelVariableTable &vtable, const IGpuTemplateComponentWriter::ComponentGroup &comp_group) const
+{
+ TagLUT lut{};
+
+ // Arguments and global shared variables
+ lut["src"] = vtable.get_variable(_src);
+ lut["dst"] = vtable.get_variable(_dst);
+
+ const auto dst_argument = vtable.get_variable(comp_group.get_any_dst_tensor());
+ lut["arg_dst"] = dst_argument.uniq_name;
+
+ // Local build options
+ lut["meta_kernel_id"] = id();
+ lut["SRC_DATA_TYPE"] = get_cl_type_from_data_type(_src->data_type());
+ lut["SRC_TENSOR_TYPE"] = "BUFFER";
+ lut["DST_DATA_TYPE"] = get_cl_type_from_data_type(_dst->data_type());
+ lut["CONSTANT_VALUE"] = string_from_pixel_value(0, _src->data_type());
+
+ const bool is_qasymm_bilinear = is_data_type_quantized_asymmetric(_src->data_type())
+ && _attributes.interpolation_policy() == InterpolationPolicy::BILINEAR;
+
+ if(is_qasymm_bilinear)
+ {
+ const UniformQuantizationInfo qinfo = _src->quantization_info().uniform();
+ lut["SCALE"] = support::cpp11::to_string(qinfo.scale);
+ lut["OFFSET"] = support::cpp11::to_string(qinfo.offset);
+ }
+ else
+ {
+ lut["SCALE"] = support::cpp11::to_string(1);
+ lut["OFFSET"] = support::cpp11::to_string(0);
+ }
+
+ return lut;
+}
+
+CLBuildOptions ClTemplateResize::get_build_options(const IGpuTemplateComponentWriter::ComponentGroup &comp_group) const
+{
+ const Window root_window = comp_group.get_root_component()->template_writer()->get_window();
+ const unsigned int n0 = root_window.x().step();
+ const unsigned int m0 = root_window.y().step();
+ const unsigned int partial_n0 = _dst->dimension(0) % n0;
+
+ const float scale_x = scale_utils::calculate_resize_ratio(_src->dimension(1), _dst->dimension(1), _attributes.align_corners());
+ const float scale_y = scale_utils::calculate_resize_ratio(_src->dimension(2), _dst->dimension(2), _attributes.align_corners());
+
+ CLBuildOptions build_opts;
+
+ build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
+ build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
+ build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_n0));
+ build_opts.add_option("-DSCALE_X=" + float_to_string_with_full_precision(scale_x));
+ build_opts.add_option("-DSCALE_Y=" + float_to_string_with_full_precision(scale_y));
+
+ return build_opts;
+}
+
+std::string ClTemplateResize::get_config_id() const
+{
+ std::string config_id{};
+
+ config_id += "resize_";
+ config_id += (_attributes.interpolation_policy() == InterpolationPolicy::NEAREST_NEIGHBOR ? "NEAREST_NEIGHBOR" : "");
+ config_id += (_attributes.interpolation_policy() == InterpolationPolicy::BILINEAR ? "BILINEAR" : "");
+ config_id += "_";
+ config_id += (_attributes.sampling_policy() == SamplingPolicy::CENTER ? "center" : "topleft");
+ config_id += "_";
+ config_id += support::cpp11::to_string(_dst->dimension(0));
+ config_id += "_";
+ config_id += support::cpp11::to_string(_dst->dimension(1));
+ config_id += "_";
+ config_id += support::cpp11::to_string(_dst->dimension(2));
+ config_id += "_";
+ config_id += support::cpp11::to_string(_dst->dimension(3));
+
+ return config_id;
+}
+
+std::set<std::string> ClTemplateResize::get_headers_list() const
+{
+ return std::set<std::string>{ "helpers.h", "tile_helpers.h" };
+}
+
+Window ClTemplateResize::get_window() const
+{
+ ARM_COMPUTE_ERROR_ON_MSG(_dst->tensor_shape().total_size() == 0U, "Destination tensor is not initialized");
+
+ const unsigned int n0 = adjust_vec_size(16 / _src->element_size(), _src->dimension(0));
+ Window win = calculate_max_window(*_dst, Steps(n0));
+ return win.collapse(win, Window::DimZ);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.h b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.h
new file mode 100644
index 0000000000..4c69007185
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATERESIZE
+#define SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATERESIZE
+
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.h"
+#include "src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.h"
+#include "src/dynamic_fusion/sketch/gpu/template_writer/IGpuTemplateComponentWriter.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ClTemplateResize final : public IGpuTemplateComponentWriter
+{
+public:
+ using Attributes = ClComponentResize::Attributes;
+
+ /** Constructor
+ *
+ * @param[in] id Component id
+ * @param[in] tensors Tensor arguments to the components
+ * @param[in] attributes Component attributes
+ */
+ ClTemplateResize(ComponentId id, const ArgumentPack<ITensorInfo> &tensors, const Attributes &attributes);
+
+ /** Destructor */
+ ~ClTemplateResize() override = default;
+
+ /** Prevent instances of this class from being copy constructed */
+ ClTemplateResize(const ClTemplateResize &resize) = delete;
+
+ /** Prevent instances of this class from being copied */
+ ClTemplateResize &operator=(const ClTemplateResize &resize) = delete;
+
+ /** Allow instances of this class to be move constructed */
+ ClTemplateResize(ClTemplateResize &&resize) = default;
+
+ /** Allow instances of this class to be moved */
+ ClTemplateResize &operator=(ClTemplateResize &&resize) = default;
+
+ /** Generate kernel component name */
+ std::string get_name() const override;
+
+ /** Generate kernel component code template
+ *
+ * @param[in] comp_group Component group of which the component is a part of
+ *
+ * @return std::string Component code
+ */
+ std::string get_component_code(const ComponentGroup &comp_group) const override;
+
+ /** Declare all variables used by the component in the @p vtable
+ *
+ * @param[out] vtable Variable table
+ * @param[in] comp_group Component group of which the component is a part of
+ */
+ void declare_variables(GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const override;
+
+ /** Generate the tag look-up table used to instantiate the component code.
+ *
+ * @param[in] vtable Variable table
+ * @param[in] comp_group Component group of which the component is a part of
+ *
+ * @return TagLUT Tag lookup table
+ */
+ TagLUT get_tag_lut(const GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const override;
+
+ /** Generate the build options used in the component
+ *
+ * @param[in] comp_group Component group of which the component is a part of
+ *
+ * @return CLBuildOptions Build options
+ */
+ CLBuildOptions get_build_options(const ComponentGroup &comp_group) const override;
+
+ /** Generate the component config id string used for tuning */
+ std::string get_config_id() const override;
+
+ /** Generate the header list used in the component */
+ std::set<std::string> get_headers_list() const override;
+
+ /** Generate the execution window for the component */
+ Window get_window() const override;
+
+private:
+ const ITensorInfo *_src;
+ const ITensorInfo *_dst;
+ Attributes _attributes;
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATERESIZE */
diff --git a/tests/datasets/ScaleValidationDataset.h b/tests/datasets/ScaleValidationDataset.h
index c6987c0908..8987c3a1c1 100644
--- a/tests/datasets/ScaleValidationDataset.h
+++ b/tests/datasets/ScaleValidationDataset.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_SCALE_VALIDATION_DATASET
-#define ARM_COMPUTE_TEST_SCALE_VALIDATION_DATASET
+#ifndef TESTS_DATASETS_SCALEVALIDATIONDATASET
+#define TESTS_DATASETS_SCALEVALIDATIONDATASET
#include "arm_compute/core/Types.h"
#include "tests/datasets/BorderModeDataset.h"
@@ -173,6 +173,11 @@ framework::dataset::make("AlignCorners", { true }));
datasets::BorderModes()), \
samping_policy_set)
+#define ASSEMBLE_DATASET_DYNAMIC_FUSION(shape, samping_policy_set) \
+ combine(combine(combine((shape), framework::dataset::make("DataLayout", { DataLayout::NHWC })), \
+ ScaleInterpolationPolicySet), \
+ samping_policy_set)
+
#define ASSEMBLE_S8_DATASET(shape, samping_policy_set) \
combine(combine(combine(combine((shape), framework::dataset::make("DataLayout", DataLayout::NHWC)), \
framework::dataset::make("InterpolationPolicy", { InterpolationPolicy::BILINEAR })), \
@@ -194,6 +199,13 @@ framework::dataset::make("AlignCorners", { true }));
datasets::BorderModes()), \
sampling_policy_set)
+#define ASSEMBLE_QUANTIZED_DATASET_DYNAMIC_FUSION(shape, sampling_policy_set, quantization_info_set) \
+ combine(combine(combine(combine(shape, \
+ quantization_info_set), \
+ framework::dataset::make("DataLayout", { DataLayout::NHWC })), \
+ ScaleInterpolationPolicySet), \
+ sampling_policy_set)
+
/** Generating dataset for quantized data tyeps with the given shapes */
#define ASSEMBLE_DIFFERENTLY_QUANTIZED_DATASET(shape, sampling_policy_set, input_quant_info_set, output_quant_info_set) \
combine(combine(combine(combine(combine(combine(shape, \
@@ -207,4 +219,4 @@ framework::dataset::make("AlignCorners", { true }));
} // namespace datasets
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SCALE_VALIDATION_DATASET */
+#endif /* TESTS_DATASETS_SCALEVALIDATIONDATASET */
diff --git a/tests/validation/dynamic_fusion/gpu/cl/Resize.cpp b/tests/validation/dynamic_fusion/gpu/cl/Resize.cpp
new file mode 100644
index 0000000000..17c341b803
--- /dev/null
+++ b/tests/validation/dynamic_fusion/gpu/cl/Resize.cpp
@@ -0,0 +1,529 @@
+/*
+* Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/ScaleValidationDataset.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+using datasets::ScaleShapesBaseDataSet;
+using datasets::ScaleInterpolationPolicySet;
+using datasets::ScaleSamplingPolicySet;
+using datasets::ScaleAlignCornersSamplingPolicySet;
+
+/** We consider vector size in byte 16 since the maximum size of
+ * a vector used by @ref CLScaleKernel is currently 16-byte (float4).
+ */
+constexpr uint32_t vector_byte = 16;
+
+template <typename T>
+constexpr uint32_t num_elements_per_vector()
+{
+ return vector_byte / sizeof(T);
+}
+
+/** Quantization information data set */
+const auto QuantizationInfoSet = framework::dataset::make("QuantizationInfo",
+{
+ QuantizationInfo(0.5f, -1),
+});
+
+/** Tolerance */
+constexpr AbsoluteTolerance<uint8_t> tolerance_q8(1);
+constexpr AbsoluteTolerance<int8_t> tolerance_qs8(1);
+constexpr AbsoluteTolerance<int16_t> tolerance_s16(1);
+constexpr float tolerance_f32_absolute(0.001f);
+
+RelativeTolerance<float> tolerance_f32(0.05);
+constexpr float abs_tolerance_f16(0.1f);
+RelativeTolerance<half> tolerance_f16(half(0.1));
+
+constexpr float tolerance_num_f32(0.01f);
+
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(DYNAMIC_FUSION)
+TEST_SUITE(RESIZE)
+
+TEST_SUITE(Validate)
+
+const auto default_input_shape = TensorShape{ 2, 3, 3, 2 };
+const auto default_output_shape = TensorShape{ 4, 6, 3, 2 };
+
+constexpr auto default_data_type = DataType::U8;
+constexpr auto default_data_layout = DataLayout::NHWC;
+constexpr auto default_interpolation_policy = InterpolationPolicy::NEAREST_NEIGHBOR;
+constexpr bool default_use_padding = false;
+
+TEST_CASE(NullPtr, framework::DatasetMode::ALL)
+{
+ const TensorInfo input_info = TensorInfo{ default_input_shape, 1, default_data_type, default_data_layout };
+ const TensorInfo output_info = TensorInfo{ default_output_shape, 1, default_data_type, default_data_layout };
+
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
+ GpuWorkloadSketch sketch{ &gpu_ctx };
+
+ const TensorInfo sketch_input_info = sketch.create_tensor_info(input_info);
+ const TensorInfo sketch_output_info = sketch.create_tensor_info(output_info);
+
+ // nullptr is given as input
+ Status status = GpuResize::validate_op(sketch, nullptr, &sketch_output_info, ResizeAttributes());
+ ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
+
+ // nullptr is given as output
+ status = GpuResize::validate_op(sketch, &sketch_input_info, nullptr, ResizeAttributes());
+ ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
+}
+
+TEST_CASE(SupportDataType, framework::DatasetMode::ALL)
+{
+ const std::map<DataType, bool> supported_data_types =
+ {
+ { DataType::U8, true },
+ { DataType::S8, false },
+ { DataType::QSYMM8, false },
+ { DataType::QASYMM8, true },
+ { DataType::QASYMM8_SIGNED, true },
+ { DataType::QSYMM8_PER_CHANNEL, false },
+ { DataType::U16, false },
+ { DataType::S16, true },
+ { DataType::QSYMM16, false },
+ { DataType::QASYMM16, false },
+ { DataType::U32, false },
+ { DataType::S32, false },
+ { DataType::U64, false },
+ { DataType::S64, false },
+ { DataType::BFLOAT16, false },
+ { DataType::F16, true },
+ { DataType::F32, true },
+ { DataType::F64, false },
+ { DataType::SIZET, false },
+ };
+
+ for(auto &kv : supported_data_types)
+ {
+ const TensorInfo input_info = TensorInfo{ default_input_shape, 1, kv.first, default_data_layout };
+ const TensorInfo output_info = TensorInfo{ default_output_shape, 1, kv.first, default_data_layout };
+
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
+ GpuWorkloadSketch sketch{ &gpu_ctx };
+
+ const TensorInfo sketch_input_info = sketch.create_tensor_info(input_info);
+ const TensorInfo sketch_output_info = sketch.create_tensor_info(output_info);
+
+ // nullptr is given as input
+ Status status = GpuResize::validate_op(sketch, &sketch_input_info, &sketch_output_info, ResizeAttributes());
+ ARM_COMPUTE_EXPECT(bool(status) == kv.second, framework::LogLevel::ERRORS);
+ }
+}
+
+TEST_CASE(MismatchingDataType, framework::DatasetMode::ALL)
+{
+ constexpr DataType non_default_data_type = DataType::F32;
+
+ const TensorInfo input_info = TensorInfo{ default_input_shape, 1, default_data_type, default_data_layout };
+ const TensorInfo output_info = TensorInfo{ default_output_shape, 1, non_default_data_type, default_data_layout };
+
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
+ GpuWorkloadSketch sketch{ &gpu_ctx };
+
+ const TensorInfo sketch_input_info = sketch.create_tensor_info(input_info);
+ const TensorInfo sketch_output_info = sketch.create_tensor_info(output_info);
+
+ Status status = GpuResize::validate_op(sketch, &sketch_input_info, &sketch_output_info, ResizeAttributes());
+ ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
+}
+
+TEST_CASE(AlignedCornerNotSupported, framework::DatasetMode::ALL)
+{
+ // Aligned corners require sampling policy to be TOP_LEFT.
+ constexpr InterpolationPolicy interpolation_policy = InterpolationPolicy::BILINEAR;
+ constexpr bool align_corners = true;
+ constexpr SamplingPolicy sampling_policy = SamplingPolicy::CENTER;
+
+ const TensorInfo input_info = TensorInfo{ default_input_shape, 1, default_data_type, default_data_layout };
+ const TensorInfo output_info = TensorInfo{ default_output_shape, 1, default_data_type, default_data_layout };
+
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
+ GpuWorkloadSketch sketch{ &gpu_ctx };
+
+ const TensorInfo sketch_input_info = sketch.create_tensor_info(input_info);
+ const TensorInfo sketch_output_info = sketch.create_tensor_info(output_info);
+
+ ResizeAttributes attributes{};
+ attributes.interpolation_policy(interpolation_policy)
+ .sampling_policy(sampling_policy)
+ .align_corners(align_corners);
+
+ Status status = GpuResize::validate_op(sketch, &sketch_input_info, &sketch_output_info, attributes);
+ ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
+}
+
+TEST_CASE(UnsupportedInterpolationPolicy, framework::DatasetMode::ALL)
+{
+ const TensorInfo input_info = TensorInfo{ TensorShape(28U, 33U, 2U), 1, DataType::F32, default_data_layout };
+ const TensorInfo output_info = TensorInfo{ TensorShape(26U, 21U, 2U), 1, DataType::F32, default_data_layout };
+ constexpr auto interpolation_policy = InterpolationPolicy::AREA;
+
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
+ GpuWorkloadSketch sketch{ &gpu_ctx };
+
+ const TensorInfo sketch_input_info = sketch.create_tensor_info(input_info);
+ const TensorInfo sketch_output_info = sketch.create_tensor_info(output_info);
+
+ ResizeAttributes attributes{};
+ attributes.interpolation_policy(interpolation_policy);
+
+ Status status = GpuResize::validate_op(sketch, &sketch_input_info, &sketch_output_info, attributes);
+ ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
+}
+
+TEST_CASE(UnsupportedLayout, framework::DatasetMode::ALL)
+{
+ const TensorInfo input_info = TensorInfo{ default_input_shape, 1, default_data_type, DataLayout::NCHW };
+ const TensorInfo output_info = TensorInfo{ default_output_shape, 1, default_data_type, DataLayout::NCHW };
+ constexpr auto interpolation_policy = InterpolationPolicy::BILINEAR;
+
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
+ GpuWorkloadSketch sketch{ &gpu_ctx };
+
+ const TensorInfo sketch_input_info = sketch.create_tensor_info(input_info);
+ const TensorInfo sketch_output_info = sketch.create_tensor_info(output_info);
+
+ ResizeAttributes attributes{};
+ attributes.interpolation_policy(interpolation_policy);
+
+ Status status = GpuResize::validate_op(sketch, &sketch_input_info, &sketch_output_info, attributes);
+ ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
+}
+
+TEST_SUITE_END() // Validate
+
+template <typename T>
+using DynamicFusionResizeFixture = DynamicFusionResizeValidationFixture<CLTensor, CLAccessor, GpuResize, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+
+const auto f32_shape = combine((SCALE_PRECOMMIT_SHAPE_DATASET(num_elements_per_vector<float>())), framework::dataset::make("DataType", DataType::F32));
+
+FIXTURE_DATA_TEST_CASE(Run, DynamicFusionResizeFixture<float>, framework::DatasetMode::ALL, ASSEMBLE_DATASET_DYNAMIC_FUSION(f32_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32, tolerance_f32_absolute);
+}
+
+FIXTURE_DATA_TEST_CASE(RunAlignCorners, DynamicFusionResizeFixture<float>, framework::DatasetMode::ALL, ASSEMBLE_DATASET_DYNAMIC_FUSION(f32_shape, ScaleAlignCornersSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32, tolerance_f32_absolute);
+}
+const auto f32_nightly_shape = combine((SCALE_NIGHTLY_SHAPE_DATASET(num_elements_per_vector<float>())), framework::dataset::make("DataType", DataType::F32));
+FIXTURE_DATA_TEST_CASE(RunNightly, DynamicFusionResizeFixture<float>, framework::DatasetMode::NIGHTLY, ASSEMBLE_DATASET_DYNAMIC_FUSION(f32_nightly_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32, tolerance_f32_absolute);
+}
+FIXTURE_DATA_TEST_CASE(RunNightlyAlignCorners, DynamicFusionResizeFixture<float>, framework::DatasetMode::NIGHTLY, ASSEMBLE_DATASET_DYNAMIC_FUSION(f32_nightly_shape,
+ ScaleAlignCornersSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32, tolerance_f32_absolute);
+}
+TEST_SUITE_END() // FP32
+
+TEST_SUITE(FP16)
+const auto f16_shape = combine((SCALE_PRECOMMIT_SHAPE_DATASET(num_elements_per_vector<half>())), framework::dataset::make("DataType", DataType::F16));
+FIXTURE_DATA_TEST_CASE(Run, DynamicFusionResizeFixture<half>, framework::DatasetMode::ALL, ASSEMBLE_DATASET_DYNAMIC_FUSION(f16_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunAlignCorners, DynamicFusionResizeFixture<half>, framework::DatasetMode::ALL, ASSEMBLE_DATASET_DYNAMIC_FUSION(f16_shape, ScaleAlignCornersSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16);
+}
+const auto f16_nightly_shape = combine((SCALE_NIGHTLY_SHAPE_DATASET(num_elements_per_vector<half>())), framework::dataset::make("DataType", DataType::F16));
+FIXTURE_DATA_TEST_CASE(RunNightly, DynamicFusionResizeFixture<half>, framework::DatasetMode::NIGHTLY, ASSEMBLE_DATASET_DYNAMIC_FUSION(f16_nightly_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunNightlyAlignCorners, DynamicFusionResizeFixture<half>, framework::DatasetMode::NIGHTLY, ASSEMBLE_DATASET_DYNAMIC_FUSION(f16_nightly_shape,
+ ScaleAlignCornersSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_f16, 0.0f, abs_tolerance_f16);
+}
+TEST_SUITE_END() // FP16
+TEST_SUITE_END() // Float
+
+TEST_SUITE(Integer)
+TEST_SUITE(U8)
+const auto u8_shape = combine((SCALE_PRECOMMIT_SHAPE_DATASET(num_elements_per_vector<uint8_t>())), framework::dataset::make("DataType", DataType::U8));
+FIXTURE_DATA_TEST_CASE(Run, DynamicFusionResizeFixture<uint8_t>, framework::DatasetMode::ALL, ASSEMBLE_DATASET_DYNAMIC_FUSION(u8_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_q8);
+}
+FIXTURE_DATA_TEST_CASE(RunAlignCorners, DynamicFusionResizeFixture<uint8_t>, framework::DatasetMode::ALL, ASSEMBLE_DATASET_DYNAMIC_FUSION(u8_shape, ScaleAlignCornersSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_q8);
+}
+const auto u8_nightly_shape = combine((SCALE_NIGHTLY_SHAPE_DATASET(num_elements_per_vector<uint8_t>())), framework::dataset::make("DataType", DataType::U8));
+FIXTURE_DATA_TEST_CASE(RunNightly, DynamicFusionResizeFixture<uint8_t>, framework::DatasetMode::NIGHTLY, ASSEMBLE_DATASET_DYNAMIC_FUSION(u8_nightly_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_q8);
+}
+FIXTURE_DATA_TEST_CASE(RunNightlyAlignCorners, DynamicFusionResizeFixture<uint8_t>, framework::DatasetMode::NIGHTLY, ASSEMBLE_DATASET_DYNAMIC_FUSION(u8_nightly_shape,
+ ScaleAlignCornersSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_q8);
+}
+TEST_SUITE_END() // U8
+
+TEST_SUITE(S16)
+const auto s16_shape = combine((SCALE_PRECOMMIT_SHAPE_DATASET(num_elements_per_vector<int16_t>())), framework::dataset::make("DataType", DataType::S16));
+FIXTURE_DATA_TEST_CASE(Run, DynamicFusionResizeFixture<int16_t>, framework::DatasetMode::ALL, ASSEMBLE_DATASET_DYNAMIC_FUSION(s16_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_s16);
+}
+FIXTURE_DATA_TEST_CASE(RunAlignCorners, DynamicFusionResizeFixture<int16_t>, framework::DatasetMode::ALL, ASSEMBLE_DATASET_DYNAMIC_FUSION(s16_shape, ScaleAlignCornersSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_s16);
+}
+const auto s16_nightly_shape = combine((SCALE_NIGHTLY_SHAPE_DATASET(num_elements_per_vector<int16_t>())), framework::dataset::make("DataType", DataType::S16));
+FIXTURE_DATA_TEST_CASE(RunNightly, DynamicFusionResizeFixture<int16_t>, framework::DatasetMode::NIGHTLY, ASSEMBLE_DATASET_DYNAMIC_FUSION(s16_nightly_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_s16);
+}
+FIXTURE_DATA_TEST_CASE(RunNightlyAlignCorners, DynamicFusionResizeFixture<int16_t>, framework::DatasetMode::NIGHTLY, ASSEMBLE_DATASET_DYNAMIC_FUSION(s16_nightly_shape,
+ ScaleAlignCornersSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_s16);
+}
+TEST_SUITE_END() // S16
+TEST_SUITE_END() // Integer
+
+template <typename T>
+using DynamicFusionResizeQuantizedFixture = DynamicFusionResizeQuantizedValidationFixture<CLTensor, CLAccessor, GpuResize, T>;
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+const auto qasymm8_shape = combine((SCALE_PRECOMMIT_SHAPE_DATASET(num_elements_per_vector<uint8_t>())), framework::dataset::make("DataType", DataType::QASYMM8));
+FIXTURE_DATA_TEST_CASE(Run, DynamicFusionResizeQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, ASSEMBLE_QUANTIZED_DATASET_DYNAMIC_FUSION(qasymm8_shape, ScaleSamplingPolicySet,
+ QuantizationInfoSet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_q8);
+}
+FIXTURE_DATA_TEST_CASE(RunAlignCorners, DynamicFusionResizeQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, ASSEMBLE_QUANTIZED_DATASET_DYNAMIC_FUSION(qasymm8_shape,
+ ScaleAlignCornersSamplingPolicySet,
+ QuantizationInfoSet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_q8);
+}
+const auto qasymm8_nightly_shape = combine((SCALE_NIGHTLY_SHAPE_DATASET(num_elements_per_vector<uint8_t>())), framework::dataset::make("DataType", DataType::QASYMM8));
+FIXTURE_DATA_TEST_CASE(RunNightly, DynamicFusionResizeQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, ASSEMBLE_QUANTIZED_DATASET_DYNAMIC_FUSION(qasymm8_nightly_shape,
+ ScaleSamplingPolicySet,
+ QuantizationInfoSet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_q8);
+}
+FIXTURE_DATA_TEST_CASE(RunNightlyAlignCorners, DynamicFusionResizeQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, ASSEMBLE_QUANTIZED_DATASET_DYNAMIC_FUSION(qasymm8_nightly_shape,
+ ScaleAlignCornersSamplingPolicySet,
+ QuantizationInfoSet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_q8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+const auto qasymm8_signed_shape = combine((SCALE_PRECOMMIT_SHAPE_DATASET(num_elements_per_vector<int8_t>())), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED));
+FIXTURE_DATA_TEST_CASE(Run, DynamicFusionResizeQuantizedFixture<int8_t>, framework::DatasetMode::ALL, ASSEMBLE_QUANTIZED_DATASET_DYNAMIC_FUSION(qasymm8_signed_shape, ScaleSamplingPolicySet,
+ QuantizationInfoSet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_qs8);
+}
+FIXTURE_DATA_TEST_CASE(RunAlignCorners, DynamicFusionResizeQuantizedFixture<int8_t>, framework::DatasetMode::ALL, ASSEMBLE_QUANTIZED_DATASET_DYNAMIC_FUSION(qasymm8_signed_shape,
+ ScaleAlignCornersSamplingPolicySet,
+ QuantizationInfoSet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_qs8);
+}
+const auto qasymm8_signed_nightly_shape = combine((SCALE_NIGHTLY_SHAPE_DATASET(num_elements_per_vector<int8_t>())), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED));
+FIXTURE_DATA_TEST_CASE(RunNightly, DynamicFusionResizeQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY, ASSEMBLE_QUANTIZED_DATASET_DYNAMIC_FUSION(qasymm8_signed_nightly_shape,
+ ScaleSamplingPolicySet,
+ QuantizationInfoSet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_qs8);
+}
+FIXTURE_DATA_TEST_CASE(RunNightlyAlignCorners, DynamicFusionResizeQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY, ASSEMBLE_QUANTIZED_DATASET_DYNAMIC_FUSION(qasymm8_signed_nightly_shape,
+ ScaleAlignCornersSamplingPolicySet,
+ QuantizationInfoSet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _interpolation_policy, _sampling_policy, false);
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_qs8);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
+
+TEST_SUITE_END() // RESIZE
+TEST_SUITE_END() // DYNAMIC_FUSION
+TEST_SUITE_END() // CL
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h
new file mode 100644
index 0000000000..5cdf52e62b
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h
@@ -0,0 +1,264 @@
+/*
+* Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESIZEFIXTURE
+#define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESIZEFIXTURE
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/SimpleTensor.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/Permute.h"
+#include "tests/validation/reference/Scale.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionResizeGenericValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout,
+ InterpolationPolicy interpolation_policy, SamplingPolicy sampling_policy,
+ bool align_corners, QuantizationInfo output_quantization_info)
+ {
+ _shape = shape;
+ _interpolation_policy = interpolation_policy;
+ _sampling_policy = sampling_policy;
+ _data_type = data_type;
+ _input_quantization_info = quantization_info;
+ _output_quantization_info = output_quantization_info;
+ _align_corners = align_corners;
+ _data_layout = data_layout;
+
+ ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NHWC); // Dynamic fusion resize supports only NHWC layout
+
+ generate_scale(shape);
+
+ std::mt19937 generator(library->seed());
+ std::uniform_int_distribution<uint32_t> distribution_u8(0, 255);
+
+ _target = compute_target(shape);
+ _reference = compute_reference(shape);
+ }
+
+protected:
+ void generate_scale(const TensorShape &shape)
+ {
+ static constexpr float _min_scale{ 0.25f };
+ static constexpr float _max_scale{ 3.f };
+
+ constexpr float max_width{ 8192.0f };
+ constexpr float max_height{ 6384.0f };
+ constexpr float min_width{ 1.f };
+ constexpr float min_height{ 1.f };
+
+ std::mt19937 generator(library->seed());
+ std::uniform_real_distribution<float> distribution_float(_min_scale, _max_scale);
+
+ auto generate = [&](size_t input_size, float min_output, float max_output) -> int
+ {
+ const float generated_scale = distribution_float(generator);
+ const int output_size = static_cast<int>(utility::clamp(static_cast<float>(input_size) * generated_scale, min_output, max_output));
+ return output_size;
+ };
+
+ // Input shape is always given in NCHW layout. NHWC is dealt by permute in compute_target()
+ const int idx_width = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::HEIGHT);
+
+ _output_width = generate(shape[idx_width], min_width, max_width);
+ _output_height = generate(shape[idx_height], min_height, max_height);
+ }
+
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ if(tensor.data_type() == DataType::F32)
+ {
+ std::uniform_real_distribution<float> distribution(-5.0f, 5.0f);
+ library->fill(tensor, distribution, 0);
+ }
+ else if(tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -5.0f, 5.0f };
+ library->fill(tensor, distribution, 0);
+ }
+ else if(is_data_type_quantized(tensor.data_type()))
+ {
+ std::uniform_int_distribution<> distribution(0, 100);
+ library->fill(tensor, distribution, 0);
+ }
+ else
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+ }
+
+ TensorType compute_target(TensorShape shape)
+ {
+ // Our test shapes are assumed in NCHW data layout, thus the permutation
+ permute(shape, PermutationVector(2U, 0U, 1U));
+
+ // Create a new workload sketch
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
+ GpuWorkloadSketch sketch{ &gpu_ctx };
+
+ // Create sketch tensors
+ TensorInfo src_info = sketch.create_tensor_info(TensorInfo(shape, 1, _data_type, _data_layout));
+ src_info.set_quantization_info(_input_quantization_info);
+ TensorInfo dst_info = sketch.create_tensor_info();
+
+ ResizeAttributes attributes;
+ attributes.align_corners(_align_corners).sampling_policy(_sampling_policy).interpolation_policy(_interpolation_policy).output_width(_output_width).output_height(_output_height);
+
+ TensorInfo scale_result_info = sketch.create_tensor_info();
+
+ FunctionType::create_op(sketch, &src_info, &scale_result_info, attributes);
+ GpuOutput::create_op(sketch, &scale_result_info, &dst_info);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for(auto &data : runtime.get_auxiliary_tensors())
+ {
+ auto tensor = data.first;
+ const auto aux_mem_req = data.second;
+ tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment);
+ tensor->allocator()->allocate();
+ }
+
+ // Construct user tensors
+ TensorType t_src{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_src.allocator()->init(src_info);
+ t_dst.allocator()->init(dst_info);
+
+ // Allocate and fill user tensors
+ t_src.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_src));
+
+ // Run runtime
+ runtime.run({ &t_src, &t_dst });
+
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape)
+ {
+ // Create reference
+ SimpleTensor<T> src{ shape, _data_type, 1, _input_quantization_info };
+
+ // Reference code is NCHW, so the input shapes are NCHW
+ const int idx_width = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::HEIGHT);
+
+ const float scale_x = static_cast<float>(_output_width) / shape[idx_width];
+ const float scale_y = static_cast<float>(_output_height) / shape[idx_height];
+
+ // Fill reference
+ fill(src);
+
+ return reference::scale<T>(src, scale_x, scale_y, _interpolation_policy,
+ BorderMode::REPLICATE, static_cast<T>(0), _sampling_policy, /* ceil_policy_scale */ false,
+ _align_corners, _output_quantization_info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ TensorShape _shape{};
+ InterpolationPolicy _interpolation_policy{};
+ SamplingPolicy _sampling_policy{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+ QuantizationInfo _input_quantization_info{};
+ QuantizationInfo _output_quantization_info{};
+ bool _align_corners{ false };
+ int _output_width{ 0 };
+ int _output_height{ 0 };
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionResizeValidationFixture : public DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, DataType data_type, DataLayout data_layout, InterpolationPolicy policy, SamplingPolicy sampling_policy, bool align_corners)
+ {
+ DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
+ data_type,
+ QuantizationInfo(),
+ data_layout,
+ policy,
+ sampling_policy,
+ align_corners,
+ QuantizationInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
+class DynamicFusionResizeQuantizedValidationFixture : public DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, SamplingPolicy sampling_policy,
+ bool align_corners)
+ {
+ DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
+ data_type,
+ quantization_info,
+ data_layout,
+ policy,
+ sampling_policy,
+ align_corners,
+ quantization_info);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESIZEFIXTURE */
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index d4265bfdbd..d056bfbcf4 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -42,6 +42,7 @@
#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/attributes/ClampAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h"
#include "arm_compute/runtime/CL/CLTunerTypes.h"
#include "arm_compute/runtime/CL/CLTypes.h"
#include "arm_compute/runtime/FunctionDescriptors.h"
@@ -3524,6 +3525,38 @@ inline std::string to_string(const experimental::dynamic_fusion::ClampAttributes
return str.str();
}
+/** Formatted output of the arm_compute::experimental::dynamic_fusion::ResizeAttributes type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] resize_attr arm_compute::experimental::dynamic_fusion::ResizeAttributes type to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const experimental::dynamic_fusion::ResizeAttributes &resize_attr)
+{
+ os << "ResizeAttributes="
+ << "["
+ << "AlignCorners=" << resize_attr.align_corners() << ", "
+ << "InterpolationPolicy=" << resize_attr.interpolation_policy() << ", "
+ << "OutputHeight=" << resize_attr.output_height() << ", "
+ << "OutputWidth=" << resize_attr.output_width() << ", "
+ << "SamplingPolicy=" << resize_attr.sampling_policy() << "]";
+ return os;
+}
+
+/** Formatted output of the arm_compute::experimental::dynamic_fusion::ResizeAttributes type.
+ *
+ * @param[in] resize_attr arm_compute::experimental::dynamic_fusion::ResizeAttributes type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const experimental::dynamic_fusion::ResizeAttributes &resize_attr)
+{
+ std::stringstream str;
+ str << resize_attr;
+ return str.str();
+}
+
} // namespace arm_compute
#endif /* __ARM_COMPUTE_TYPE_PRINTER_H__ */