aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp1
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h28
-rw-r--r--compute_kernel_writer/prototype/include/ckw/types/Operators.h1
-rw-r--r--compute_kernel_writer/prototype/src/Prototype.h2
-rw-r--r--compute_kernel_writer/prototype/src/TileOperand.cpp3
-rw-r--r--docs/user_guide/release_version_and_change_log.dox2
-rw-r--r--filelist.json1
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.cpp345
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.h76
-rw-r--r--src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.cpp20
-rw-r--r--src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.h22
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp60
-rw-r--r--tests/validation/dynamic_fusion/gpu/cl/Pool2d.cpp6
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h10
14 files changed, 518 insertions, 59 deletions
diff --git a/Android.bp b/Android.bp
index 871dacf321..b8a4fcd590 100644
--- a/Android.bp
+++ b/Android.bp
@@ -647,6 +647,7 @@ cc_library_static {
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwCast.cpp",
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.cpp",
+ "src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.cpp",
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwResize.cpp",
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.cpp",
"src/dynamic_fusion/sketch/gpu/components/cl/ClComponentActivation.cpp",
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h
index 6e1bcdbbfd..65a092c0a2 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUPOOL2D
-#define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUPOOL2D
+#ifndef ACL_ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUPOOL2D_H
+#define ACL_ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUPOOL2D_H
#include "arm_compute/core/Error.h"
#include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h"
@@ -83,27 +83,22 @@ public:
*
* @param[in,out] sketch Workload sketch into which the operator will be fused
* @param[in] src Source tensor
- * @param[out] dst Destination tensor
* @param[in] attributes Operator attributes
* @param[in] settings Operator settings
*/
- static void create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *src,
- ITensorInfo *dst,
- const Attributes &attributes,
- const Settings &settings);
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ const Attributes &attributes,
+ const Settings &settings);
/** Check if the operator configuration is supported, irrespective of fusion
*
- * @param[in] context Workload context within which the operator is running
- * @param[in] src Left hand side tensor info. Data types supported: F16/F32.
- * @param[out] dst Destination tensor info. Data types supported: F16/F32.
- * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
- * @param[in] attributes Operator attributes
- * @param[in] settings Operator settings
+ * @param[in] context Workload context within which the operator is running
+ * @param[in] src Left hand side tensor info. Data types supported: F16/F32.
+ * @param[in] attributes Operator attributes
+ * @param[in] settings Operator settings
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes,
const Settings &settings);
/** Validate the operator and check if it can be fused into the workload sketch.
@@ -111,11 +106,10 @@ public:
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes,
const Settings &settings);
};
} // namespace dynamic_fusion
} // namespace experimental
} // namespace arm_compute
-#endif /* ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUPOOL2D */
+#endif // ACL_ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUPOOL2D_H
diff --git a/compute_kernel_writer/prototype/include/ckw/types/Operators.h b/compute_kernel_writer/prototype/include/ckw/types/Operators.h
index 14a88c91b4..43241170a5 100644
--- a/compute_kernel_writer/prototype/include/ckw/types/Operators.h
+++ b/compute_kernel_writer/prototype/include/ckw/types/Operators.h
@@ -34,6 +34,7 @@ enum class UnaryOp : int32_t
{
LogicalNot = 0x0000, // !
BitwiseNot = 0x0001, // ~
+ Negate = 0x0002, // -
};
/* Binary operations
diff --git a/compute_kernel_writer/prototype/src/Prototype.h b/compute_kernel_writer/prototype/src/Prototype.h
index 88d6e898e4..eb9d7198a9 100644
--- a/compute_kernel_writer/prototype/src/Prototype.h
+++ b/compute_kernel_writer/prototype/src/Prototype.h
@@ -1583,6 +1583,8 @@ inline std::string to_string(UnaryOp op)
return "!";
case UnaryOp::BitwiseNot:
return "~";
+ case UnaryOp::Negate:
+ return "-";
default:
assert(false);
return "";
diff --git a/compute_kernel_writer/prototype/src/TileOperand.cpp b/compute_kernel_writer/prototype/src/TileOperand.cpp
index bf6a15b9df..0eb2ca6a64 100644
--- a/compute_kernel_writer/prototype/src/TileOperand.cpp
+++ b/compute_kernel_writer/prototype/src/TileOperand.cpp
@@ -85,6 +85,9 @@ prototype::Operand TileOperand::create_impl_operand(prototype::IGpuKernelWriter
case DataType::Fp32:
return prototype::Operand(_value[0][0], prototype::OperandType::ScalarFp32);
+ case DataType::Fp16:
+ return prototype::Operand(_value[0][0], prototype::OperandType::ScalarFp16);
+
default:
CKW_ASSERT(false);
}
diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox
index 3cb1ac4835..04ee10b9f4 100644
--- a/docs/user_guide/release_version_and_change_log.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -50,6 +50,8 @@ v23.11 Public major release
- Update OpenCLâ„¢ API headers to v2023.04.17.
- Performance optimizations:
- Optimize @ref cpu::CpuReshape
+ - Port the following kernels in the experimental Dynamic Fusion interface to use the new Compute Kernel Writer interface with support for FP16/FP32 only:
+ - @ref experimental::dynamic_fusion::GpuCkwPool2d
v23.08 Public major release
- Deprecate the legacy 'libarm_compute_core' library. This library is an artifact of Compute Library's legacy library architecture and no longer serves any purpose.
diff --git a/filelist.json b/filelist.json
index 22d5cba50b..668feb92cf 100644
--- a/filelist.json
+++ b/filelist.json
@@ -2353,6 +2353,7 @@
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwCast.cpp",
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwDirectConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.cpp",
+ "src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.cpp",
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwResize.cpp",
"src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.cpp",
"src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwKernelWriter.cpp",
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.cpp b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.cpp
new file mode 100644
index 0000000000..9c9a298132
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.cpp
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+#include "ckw/TensorTileSampler.h"
+#include "src/core/helpers/WindowHelpers.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuKernelArgument.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuKernelComponentGroup.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwKernelWriter.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwScopedKernelWriter.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwVariableTable.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/WriterHelper.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/Common.h"
+
+using namespace ckw;
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+GpuCkwPool2d::GpuCkwPool2d(ComponentId id,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings)
+ : IGpuCkwComponentDriver{ id, tensors },
+ _src{},
+ _dst{},
+ _attributes{ attributes },
+ _settings{ settings }
+
+{
+ _src = this->tensors().get_const_tensor(TensorType::ACL_SRC_0);
+ _dst = this->tensors().get_const_tensor(TensorType::ACL_DST_0);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
+}
+
+void GpuCkwPool2d::write_component_code(const ComponentGroup &comp_group, GpuCkwVariableTable &vtable, GpuCkwScopedKernelWriter writer) const
+{
+ const auto root_window = comp_group.get_root_component()->ckw_component_driver()->get_window();
+ const unsigned int n0 = root_window.x().step();
+ const unsigned int m0 = root_window.y().step();
+
+ GpuCkwComponentArgument *src = vtable.declare_variable(comp_group, writer, _src, TensorStorageType::ClBufferUint8Ptr, "src");
+ GpuCkwComponentArgument *dst = vtable.declare_variable(comp_group, writer, _dst, TensorStorageType::ClBufferUint8Ptr, "dst");
+
+ TileOperand &gid_0 = writer->declare_tile("gid_0", ckw::DataType::Int32);
+ TileOperand &gid_1 = writer->declare_tile("gid_1", ckw::DataType::Int32);
+ TileOperand &gid_2 = writer->declare_tile("gid_2", ckw::DataType::Int32);
+
+ writer->op_get_global_id(gid_0, 0);
+ writer->op_get_global_id(gid_1, 1);
+ writer->op_get_global_id(gid_2, 2);
+
+ // Data Layout is NHWC
+ constexpr int width_idx = 1;
+ constexpr int height_idx = 2;
+
+ const int32_t pool_size_x = static_cast<int32_t>(_attributes.pool_size().x());
+ const int32_t pool_size_y = static_cast<int32_t>(_attributes.pool_size().y());
+ const int32_t pad_x = static_cast<int32_t>(_attributes.pad().left);
+ const int32_t pad_y = static_cast<int32_t>(_attributes.pad().top);
+ const int32_t src_width = static_cast<int32_t>(_src->dimension(width_idx));
+ const int32_t src_height = static_cast<int32_t>(_src->dimension(height_idx));
+ const auto src_data_type = _src->data_type();
+
+ // Check if this is global pooling path
+ const bool is_global_pooling = (pool_size_x == src_width) && (pool_size_y == src_height) && (pad_x == 0) && (pad_y == 0);
+ // Check if this a case of FP_MIXED_PRECISION
+ const bool use_fp_mixed_precision = (src_data_type == DataType::F16) && _settings.mixed_precision() && _attributes.pool_type() != PoolingType::MAX;
+ const auto acc_data_type = (use_fp_mixed_precision) ? (DataType::F32) : (src_data_type);
+
+ TileOperand &const_0 = writer->declare_tile("0", 0);
+ const TileOperand &const_1 = writer->declare_tile("1", 1);
+ const TileOperand &const_lowest_value = writer->declare_tile("LOWEST_VALUE", std::numeric_limits<float>::lowest());
+ const TileOperand &pool_size_x_tile = writer->declare_tile("POOL_SIZE_X", pool_size_x);
+ const TileOperand &pool_size_y_tile = writer->declare_tile("POOL_SIZE_Y", pool_size_y);
+ const TileOperand &stride_x_tile = writer->declare_tile("STRIDE_X", static_cast<int32_t>(_attributes.stride().x()));
+ const TileOperand &stride_y_tile = writer->declare_tile("STRIDE_Y", static_cast<int32_t>(_attributes.stride().y()));
+ const TileOperand &pad_x_tile = writer->declare_tile("PAD_X", pad_x);
+ const TileOperand &pad_y_tile = writer->declare_tile("PAD_Y", pad_y);
+ const TileOperand &dst_height_tile = writer->declare_tile("DST_HEIGHT", static_cast<int32_t>(_dst->dimension(height_idx)));
+ const TileOperand &src_height_tile = writer->declare_tile("SRC_HEIGHT", src_height);
+ const TileOperand &src_width_tile = writer->declare_tile("SRC_WIDTH", src_width);
+
+ TileOperand &idx_out_n = writer->declare_tile("idx_out_n", ckw::DataType::Int32);
+ TileOperand &idx_out_h = writer->declare_tile("idx_out_h", ckw::DataType::Int32);
+ TileOperand &idx_out_w = writer->declare_tile("idx_out_w", ckw::DataType::Int32);
+ TileOperand &idx_out_c = writer->declare_tile("idx_out_c", ckw::DataType::Int32);
+
+ const int32_t dst_partial_n0_v = _dst->tensor_shape()[0] % n0;
+
+ get_coord(writer, idx_out_c, gid_0, n0, dst_partial_n0_v, "dst_x_", const_0);
+ get_coord(writer, idx_out_w, gid_1, 1, 0, "dst_y_", const_0);
+
+ writer->op_binary_expression(idx_out_h, gid_2, BinaryOp::Mod, dst_height_tile); // gid_2 % h
+ writer->op_binary_expression(idx_out_n, gid_2, BinaryOp::Div, dst_height_tile); // gid_2 / h
+
+ TensorTileSampler src_sampler;
+ src_sampler.width(n0);
+ src_sampler.height(m0);
+ src_sampler.format(TensorSamplerFormat::C_W_H);
+ src_sampler.address_mode_x(TensorSamplerAddressModeX::None);
+ src_sampler.address_mode_y(TensorSamplerAddressModeY::None);
+ src_sampler.address_mode_z(TensorSamplerAddressModeZ::None);
+ src_sampler.x(idx_out_c);
+ src_sampler.b(idx_out_n);
+
+ TensorTileSampler dst_sampler;
+ dst_sampler.width(n0);
+ dst_sampler.height(m0);
+ dst_sampler.format(TensorSamplerFormat::C_W_H);
+ dst_sampler.address_mode_x(TensorSamplerAddressModeX::OverlappingMin);
+ dst_sampler.address_mode_y(TensorSamplerAddressModeY::None);
+ dst_sampler.address_mode_z(TensorSamplerAddressModeZ::None);
+ dst_sampler.x(idx_out_c);
+ dst_sampler.y(idx_out_w);
+ dst_sampler.z(idx_out_h);
+ dst_sampler.b(idx_out_n);
+
+ // Prepare dst tensor and tile
+ TileInfo dst_tile_info = TileInfo(to_ckw(src_data_type), m0, n0);
+ if(!dst->has_tile())
+ {
+ TileOperand &dst_tile = writer->declare_tile("dst_tile", dst_tile_info);
+ dst->init_virtual_tensor(dst_tile, dst_sampler);
+ }
+ const TileOperand &dst_tile = dst->tile();
+
+ // A tile used to temporarily store results or as an accumulator in case of AVG and L2 pooling.
+ const TileOperand &res_tile = writer->declare_tile("res_tile", TileInfo(to_ckw(acc_data_type), m0, n0));
+
+ // Initialise result tile with appropriate value
+ if(_attributes.pool_type() == PoolingType::MAX)
+ {
+ if(_settings.use_inf_as_limit())
+ {
+ TileContainer minus_inf_tile_container;
+ std::vector<std::string> value = std::vector<std::string>(n0, "(-INFINITY)");
+ minus_inf_tile_container.push_back({ value });
+ const TileOperand &minus_inf = writer->declare_tile("minus_inf_const", minus_inf_tile_container, to_ckw(acc_data_type));
+ writer->op_assign(res_tile, minus_inf);
+ }
+ else
+ {
+ writer->op_assign(res_tile, const_lowest_value);
+ }
+ }
+ else
+ {
+ writer->op_assign(res_tile, const_0);
+ }
+
+ // idx_in_w = idx_out_w * STRIDE_X - PAD_X
+ TileOperand &idx_in_w = writer->declare_tile("idx_in_w", ckw::DataType::Int32);
+ writer->op_binary_expression(idx_in_w, idx_out_w, BinaryOp::Mul, stride_x_tile);
+ writer->op_binary_expression(idx_in_w, idx_in_w, BinaryOp::Sub, pad_x_tile);
+
+ // idx_in_h = idx_out_h * STRIDE_Y - PAD_Y
+ TileOperand &idx_in_h = writer->declare_tile("idx_in_h", ckw::DataType::Int32);
+ writer->op_binary_expression(idx_in_h, idx_out_h, BinaryOp::Mul, stride_y_tile);
+ writer->op_binary_expression(idx_in_h, idx_in_h, BinaryOp::Sub, pad_y_tile);
+
+ TileOperand &minus_idx_in_w = writer->declare_tile("minus_idx_in_w", ckw::DataType::Int32);
+ TileOperand &minus_idx_in_h = writer->declare_tile("minus_idx_in_h", ckw::DataType::Int32);
+
+ writer->op_unary_expression(minus_idx_in_w, UnaryOp::Negate, idx_in_w);
+ writer->op_unary_expression(minus_idx_in_h, UnaryOp::Negate, idx_in_h);
+
+ // Pooling starting/ending offsets for X dim
+ TileOperand &pool_x_s = writer->declare_tile("pool_x_s", ckw::DataType::Int32);
+ TileOperand &pool_x_e = writer->declare_tile("pool_x_e", ckw::DataType::Int32);
+
+ writer->op_binary_elementwise_function(pool_x_s, BinaryFunction::Max, const_0, minus_idx_in_w);
+ writer->op_binary_expression(pool_x_e, src_width_tile, BinaryOp::Add, minus_idx_in_w);
+ writer->op_binary_elementwise_function(pool_x_e, BinaryFunction::Min, pool_size_x_tile, pool_x_e);
+
+ // Pooling starting/ending offsets for Y dim
+ TileOperand &pool_y_s = writer->declare_tile("pool_y_s", ckw::DataType::Int32);
+ TileOperand &pool_y_e = writer->declare_tile("pool_y_e", ckw::DataType::Int32);
+
+ writer->op_binary_elementwise_function(pool_y_s, BinaryFunction::Max, const_0, minus_idx_in_h);
+ writer->op_binary_expression(pool_y_e, src_height_tile, BinaryOp::Add, minus_idx_in_h);
+ writer->op_binary_elementwise_function(pool_y_e, BinaryFunction::Min, pool_size_y_tile, pool_y_e);
+
+ const TileOperand &filter_size = writer->declare_tile("filter_size", ckw::DataType::Int32);
+ if(_attributes.exclude_padding())
+ {
+ const TileOperand &y_diff = writer->declare_tile("y_diff", ckw::DataType::Int32);
+ const TileOperand &x_diff = writer->declare_tile("x_diff", ckw::DataType::Int32);
+
+ writer->op_binary_expression(y_diff, pool_y_e, BinaryOp::Sub, pool_y_s);
+ writer->op_binary_expression(x_diff, pool_x_e, BinaryOp::Sub, pool_x_s);
+
+ writer->op_binary_expression(filter_size, y_diff, BinaryOp::Mul, x_diff);
+ }
+ else
+ {
+ writer->op_binary_expression(filter_size, pool_size_x_tile, BinaryOp::Mul, pool_size_y_tile);
+ }
+
+ const TileOperand &x = writer->declare_tile("x", ckw::DataType::Int32);
+ const TileOperand &y = writer->declare_tile("y", ckw::DataType::Int32);
+
+ if(is_global_pooling)
+ {
+ writer->op_assign(x, const_0);
+ writer->op_assign(y, const_0);
+
+ writer->op_assign(pool_y_e, pool_size_y_tile);
+ writer->op_assign(pool_x_e, pool_size_x_tile);
+ }
+ else
+ {
+ writer->op_assign(x, pool_x_s);
+ writer->op_assign(y, pool_y_s);
+ }
+
+ // Y dim for-loop
+ writer->op_for_loop(y, BinaryOp::Less, pool_y_e, y, AssignmentOp::Increment, const_1, [&]()
+ {
+ // Reset the iterator for the inner loop
+ if(is_global_pooling)
+ {
+ writer->op_assign(x, const_0);
+ }
+ else
+ {
+ writer->op_assign(x, pool_x_s);
+ }
+
+ TileOperand &a_y = writer->declare_tile("a_y", ckw::DataType::Int32);
+ writer->op_binary_expression(a_y, idx_in_h, BinaryOp::Add, y);
+
+ // X dim for-loop
+ writer->op_for_loop(x, BinaryOp::Less, pool_x_e, x, AssignmentOp::Increment, const_1, [&]()
+ {
+ TileOperand &a_x = writer->declare_tile("a_x", ckw::DataType::Int32);
+ writer->op_binary_expression(a_x, idx_in_w, BinaryOp::Add, x);
+
+ TileOperand &src_tile = writer->declare_tile("src_tile", TileInfo(to_ckw(acc_data_type), m0, n0));
+
+ src_sampler.y(a_x);
+ src_sampler.z(a_y);
+
+ // Load src tile
+ if(use_fp_mixed_precision)
+ {
+ TileOperand &src_uncasted_tile = writer->declare_tile("uncasted_src_tile", dst_tile_info);
+ writer->op_load(src_uncasted_tile, src->tensor(), src_sampler);
+ writer->op_cast_expression(src_tile, src_uncasted_tile, ckw::ConvertPolicy::None);
+ }
+ else
+ {
+ writer->op_load(src_tile, src->tensor(), src_sampler);
+ }
+
+ // Take the square of the input, for L2 Pooling
+ if(_attributes.pool_type() == PoolingType::L2)
+ {
+ writer->op_binary_expression(src_tile, src_tile, BinaryOp::Mul, src_tile);
+ }
+
+ // Perfom Pooling op
+ if(_attributes.pool_type() == PoolingType::MAX)
+ {
+ writer->op_binary_elementwise_function(res_tile, BinaryFunction::Max, res_tile, src_tile);
+ }
+ else
+ {
+ writer->op_binary_expression(res_tile, res_tile, BinaryOp::Add, src_tile);
+ }
+ });
+ });
+
+ if((_attributes.pool_type() == PoolingType::AVG) || (_attributes.pool_type() == PoolingType::L2))
+ {
+ // filter_size is automatically broadcasted in the operation
+ writer->op_binary_expression(res_tile, res_tile, BinaryOp::Div, filter_size);
+ }
+
+ // Take square root of the result in L2 pooling
+ if(_attributes.pool_type() == PoolingType::L2)
+ {
+ writer->op_unary_elementwise_function(res_tile, UnaryFunction::Sqrt, res_tile);
+ }
+
+ // Store the results and do casting if FP_MIXED_PRECISION
+ if(use_fp_mixed_precision)
+ {
+ writer->op_cast_expression(dst_tile, res_tile, ckw::ConvertPolicy::None);
+ }
+ else
+ {
+ writer->op_assign(dst_tile, res_tile);
+ }
+}
+
+Window GpuCkwPool2d::get_window() const
+{
+ ARM_COMPUTE_ERROR_ON_MSG(_dst->tensor_shape().total_size() == 0U, "Destination tensor is not initialized");
+
+ TensorShape output_shape = _dst->tensor_shape();
+ const unsigned int vec_size = adjust_vec_size(((_dst->data_type() == DataType::F32) ? 2 : 4), _dst->dimension(0));
+ // Create and configure kernel window
+ auto win = calculate_max_window(output_shape, Steps(vec_size));
+ win = win.collapse_if_possible(win, Window::DimZ); // collapse window on batch size.
+ return win;
+}
+
+std::string GpuCkwPool2d::get_name(const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+
+ return "pool2dMxN";
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.h b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.h
new file mode 100644
index 0000000000..2ccf255236
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_GPUCKWPOOL2D_H
+#define ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_GPUCKWPOOL2D_H
+
+#include "src/core/common/Macros.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/IGpuCkwComponentDriver.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.h"
+
+#include <string>
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class GpuCkwPool2d : public IGpuCkwComponentDriver
+{
+public:
+ using Attributes = ClComponentPool2d::Attributes;
+ using Settings = ClComponentPool2d::Settings;
+
+ /** Constructor
+ *
+ * For supported configurations please refer to @ref ClComponentCast::validate()
+ *
+ * @param[in] id Component id
+ * @param[in] tensors Tensor arguments to the component
+ * @param[in] attributes Component attributes
+ * @param[in] settings Component settings
+ */
+ GpuCkwPool2d(ComponentId id,
+ const ArgumentPack<ITensorInfo> &tensors,
+ const Attributes &attributes,
+ const Settings &settings);
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(GpuCkwPool2d);
+ /** Destructor */
+ ~GpuCkwPool2d() override = default;
+ // Inherited methods overriden:
+ virtual void write_component_code(const ComponentGroup &comp_group, GpuCkwVariableTable &vtable, GpuCkwScopedKernelWriter writer) const override;
+ Window get_window() const override;
+ std::string get_name(const ComponentGroup &comp_group) const override;
+
+private:
+ const ITensorInfo *_src;
+ const ITensorInfo *_dst;
+ Attributes _attributes;
+ Settings _settings;
+};
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_GPUCKWPOOL2D_H
diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.cpp b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.cpp
index 2b01803224..d415769094 100644
--- a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.cpp
+++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.cpp
@@ -28,6 +28,7 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h"
#include "src/core/CL/CLValidate.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwPool2d.h"
#include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplatePool2d.h"
#include "src/dynamic_fusion/utils/Utils.h"
#include <memory>
@@ -89,16 +90,33 @@ ClComponentPool2d::ClComponentPool2d(
const Attributes &attributes,
const Settings &settings)
: IGpuKernelComponent{ id, properties, tensors },
- _component_writer{ std::make_unique<ClTemplatePool2d>(id, tensors, attributes, settings) }
+#ifndef ACL_INTERNAL_TEST_CKW_IN_DF
+ _component_writer
+{
+ std::make_unique<ClTemplatePool2d>(id, tensors, attributes, settings)
+}
+#else //ACL_INTERNAL_TEST_CKW_IN_DF
+ _component_writer
+{
+ std::make_unique<GpuCkwPool2d>(id, tensors, attributes, settings)
+}
+#endif //ACL_INTERNAL_TEST_CKW_IN_DF
{
}
ClComponentPool2d::~ClComponentPool2d()
{
}
+#ifndef ACL_INTERNAL_TEST_CKW_IN_DF
const IGpuTemplateComponentWriter *ClComponentPool2d::template_writer() const
{
return _component_writer.get();
}
+#else //ACL_INTERNAL_TEST_CKW_IN_DF
+const IGpuCkwComponentDriver *ClComponentPool2d::ckw_component_driver() const
+{
+ return _component_writer.get();
+}
+#endif //ACL_INTERNAL_TEST_CKW_IN_DF
} // namespace dynamic_fusion
} // namespace experimental
} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.h b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.h
index 896048e27a..6814bf9243 100644
--- a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.h
+++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTPOOL2D
-#define SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTPOOL2D
+#ifndef ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTPOOL2D_H
+#define ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTPOOL2D_H
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h"
#include "src/dynamic_fusion/sketch/gpu/components/IGpuKernelComponent.h"
@@ -41,7 +41,11 @@ class ArgumentPack;
class Pool2dAttributes;
/** Forward declaration */
+#ifndef ACL_INTERNAL_TEST_CKW_IN_DF
class ClTemplatePool2d;
+#else // ACL_INTERNAL_TEST_CKW_IN_DF
+class GpuCkwPool2d;
+#endif // ACL_INTERNAL_TEST_CKW_IN_DF
class ClComponentPool2d final : public IGpuKernelComponent
{
@@ -113,20 +117,28 @@ public:
/** Allow instances of this class to be moved */
ClComponentPool2d &operator=(ClComponentPool2d &&component) = default;
-
+#ifndef ACL_INTERNAL_TEST_CKW_IN_DF
/** Get template writer for the component */
const IGpuTemplateComponentWriter *template_writer() const override;
+#else // ACL_INTERNAL_TEST_CKW_IN_DF
+ /** Get GPU kernel writer for the component */
+ const IGpuCkwComponentDriver *ckw_component_driver() const override;
+#endif // ACL_INTERNAL_TEST_CKW_IN_DF
/** Get component type */
GpuComponentType type() const override
{
- return GpuComponentType::Unfusable;
+ return GpuComponentType::Complex;
}
private:
+#ifndef ACL_INTERNAL_TEST_CKW_IN_DF
std::unique_ptr<ClTemplatePool2d> _component_writer;
+#else // ACL_INTERNAL_TEST_CKW_IN_DF
+ std::unique_ptr<GpuCkwPool2d> _component_writer;
+#endif // ACL_INTERNAL_TEST_CKW_IN_DF
};
} // namespace dynamic_fusion
} // namespace experimental
} // namespace arm_compute
-#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTPOOL2D */
+#endif // ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTPOOL2D_H
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp
index e464be8607..7ecfa0158b 100644
--- a/src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp
@@ -46,7 +46,16 @@ namespace dynamic_fusion
{
namespace
{
-constexpr GpuOperatorType operator_type = GpuOperatorType::Unfusable;
+void calculate_and_init_dst_if_empty(ITensorInfo *dst, const ITensorInfo *src, const Pool2dAttributes &attributes, const GpuPool2dSettings &settings)
+{
+ if(dst->total_size() == 0U)
+ {
+ auto shape = misc::shape_calculator::compute_pool_shape(*src, convert_pool_attr_to_pool_info(attributes, settings.mixed_precision()));
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(shape));
+ }
+}
+
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
} // namespace
GpuPool2dSettings &GpuPool2dSettings::mixed_precision(bool mixed_precision)
@@ -73,19 +82,16 @@ bool GpuPool2dSettings::use_inf_as_limit() const
Status GpuPool2d::validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Pool2dAttributes &attributes,
const GpuPool2dSettings &settings)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !dst->has_valid_id());
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
// Auto initialize dst tensor info
- TensorInfo dst_info_to_validate = *dst;
- {
- auto shape = misc::shape_calculator::compute_pool_shape(*src, convert_pool_attr_to_pool_info(attributes, settings.mixed_precision()));
- auto_init_if_empty(dst_info_to_validate, src->clone()->set_tensor_shape(shape));
- }
+ TensorInfo dst_info_to_validate;
+
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, attributes, settings);
// Perform fusion test
// Pack tensor infos
@@ -98,16 +104,15 @@ Status GpuPool2d::validate_op(const GpuWorkloadSketch &sketch,
"Operator fusion test failed. This operator cannot be fused into the workload");
// Check if configuration is supported
- return is_supported_op(*sketch.gpu_context(), src, &dst_info_to_validate, attributes, settings);
+ return is_supported_op(*sketch.gpu_context(), src, attributes, settings);
}
Status GpuPool2d::is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Pool2dAttributes &attributes,
const GpuPool2dSettings &settings)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
// Data type
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
// Data layout
@@ -116,11 +121,9 @@ Status GpuPool2d::is_supported_op(const GpuWorkloadContext &context,
ARM_COMPUTE_RETURN_ERROR_ON_MSG(!attributes.exclude_padding(), "Exclude padding must be set to true in Attributes!");
// Auto initialize dst tensor info
- TensorInfo dst_info_to_validate = *dst;
- {
- auto shape = misc::shape_calculator::compute_pool_shape(*src, convert_pool_attr_to_pool_info(attributes, settings.mixed_precision()));
- auto_init_if_empty(dst_info_to_validate, src->clone()->set_tensor_shape(shape));
- }
+ TensorInfo dst_info_to_validate;
+
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, attributes, settings);
// Check components
if(context.gpu_language() == GpuLanguage::OpenCL)
@@ -145,21 +148,20 @@ Status GpuPool2d::is_supported_op(const GpuWorkloadContext &context,
return Status{};
}
-void GpuPool2d::create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *src,
- ITensorInfo *dst,
- const Pool2dAttributes &attributes,
- const GpuPool2dSettings &settings)
+ITensorInfo *GpuPool2d::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ const Pool2dAttributes &attributes,
+ const GpuPool2dSettings &settings)
{
// Assert validation
- ARM_COMPUTE_ERROR_THROW_ON(GpuPool2d::validate_op(sketch, src, dst, attributes, settings));
- ARM_COMPUTE_LOG_PARAMS(src, dst, attributes, settings);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuPool2d::validate_op(sketch, src, attributes, settings));
+ ARM_COMPUTE_LOG_PARAMS(src, attributes, settings);
+
+ ITensorInfo *dst = sketch.implementation().create_virtual_tensor();
+ ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
// Auto initialize dst tensor
- {
- auto shape = misc::shape_calculator::compute_pool_shape(*src, convert_pool_attr_to_pool_info(attributes, settings.mixed_precision())); // use the default DimensionRoundingType
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(shape));
- }
+ calculate_and_init_dst_if_empty(dst, src, attributes, settings);
// Translate into components and add to component graph
auto &comp_graph = sketch.implementation().component_graph();
@@ -198,6 +200,8 @@ void GpuPool2d::create_op(GpuWorkloadSketch &sketch,
const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
}
} // namespace dynamic_fusion
diff --git a/tests/validation/dynamic_fusion/gpu/cl/Pool2d.cpp b/tests/validation/dynamic_fusion/gpu/cl/Pool2d.cpp
index 7f5efd662a..b13364ccf1 100644
--- a/tests/validation/dynamic_fusion/gpu/cl/Pool2d.cpp
+++ b/tests/validation/dynamic_fusion/gpu/cl/Pool2d.cpp
@@ -21,8 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
-#ifndef ACL_INTERNAL_TEST_CKW_IN_DF // Do not include this test if ACL_INTERNAL_TEST_CKW_IN_DF and the op has not been ported to ckw
+#ifdef ACL_INTERNAL_TEST_CKW_IN_DF
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h"
#include "tests/CL/CLAccessor.h"
@@ -111,7 +110,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
// Validate Pool2d Configuration
auto src_info = context.create_tensor_info(input_info);
auto dst_info = context.create_tensor_info(output_info);
- bool res = bool(GpuPool2d::validate_op(sketch, &src_info, &dst_info, pool2d_attr, settings));
+ bool res = bool(GpuPool2d::validate_op(sketch, &src_info, pool2d_attr, settings));
ARM_COMPUTE_EXPECT(res == expected, framework::LogLevel::ERRORS);
}
@@ -232,5 +231,4 @@ TEST_SUITE_END() // CL
}
}
}
-
#endif // ACL_INTERNAL_TEST_CKW_IN_DF
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h
index 0efb761967..34f2647741 100644
--- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE
-#define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE_H
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/TensorInfo.h"
@@ -33,6 +33,7 @@
#include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
#include "src/dynamic_fusion/utils/Utils.h"
#include "tests/CL/CLAccessor.h"
@@ -100,7 +101,8 @@ protected:
// Create Pool2dSettings
GpuPool2dSettings pool_settings = GpuPool2dSettings().mixed_precision(mixed_precision);
- FunctionType::create_op(sketch, &input_info, &dst_info, pool_attr, pool_settings);
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, &input_info, pool_attr, pool_settings);
+ GpuOutput::create_op(sketch, ans_info, &dst_info);
// Configure runtime
ClWorkloadRuntime runtime;
@@ -184,4 +186,4 @@ public:
} // namespace test
} // namespace arm_compute
-#endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE_H