aboutsummaryrefslogtreecommitdiff
path: root/src/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu')
-rw-r--r--src/gpu/cl/ClKernelLibrary.cpp8
-rw-r--r--src/gpu/cl/kernels/ClScatterKernel.cpp204
-rw-r--r--src/gpu/cl/kernels/ClScatterKernel.h14
-rw-r--r--src/gpu/cl/operators/ClScatter.cpp57
-rw-r--r--src/gpu/cl/operators/ClScatter.h8
5 files changed, 254 insertions, 37 deletions
diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp
index 4544a66e39..c4117b8a1a 100644
--- a/src/gpu/cl/ClKernelLibrary.cpp
+++ b/src/gpu/cl/ClKernelLibrary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2023 Arm Limited.
+ * Copyright (c) 2016-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -441,6 +441,8 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{"reorg_layer_nhwc", "nhwc/reorg_layer.cl"},
{"scale_nearest_neighbour_nhwc", "nhwc/scale.cl"},
{"scale_bilinear_nhwc", "nhwc/scale.cl"},
+ {"scatter_mp1d_2d_mpnd", "common/scatter.cl"},
+ {"scatter1D", "common/scatter.cl"},
{"space_to_batch_nhwc", "nhwc/space_to_batch.cl"},
{"space_to_batch_static_nhwc", "nhwc/space_to_batch.cl"},
{"space_to_depth_nhwc", "nhwc/space_to_depth.cl"},
@@ -591,6 +593,10 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/common/gather.clembed"
},
{
+ "common/scatter.cl",
+#include "./cl_kernels/common/scatter.clembed"
+ },
+ {
"common/gemm.cl",
#include "./cl_kernels/common/gemm.clembed"
},
diff --git a/src/gpu/cl/kernels/ClScatterKernel.cpp b/src/gpu/cl/kernels/ClScatterKernel.cpp
index 720164366e..19adc1ef34 100644
--- a/src/gpu/cl/kernels/ClScatterKernel.cpp
+++ b/src/gpu/cl/kernels/ClScatterKernel.cpp
@@ -26,6 +26,15 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/DataTypeUtils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/WindowHelpers.h"
+#include "support/Cast.h"
+
+#include <cstdint>
namespace arm_compute
{
@@ -33,44 +42,207 @@ namespace opencl
{
namespace kernels
{
+
+namespace
+{
+constexpr int max_index_length = 5;
+} // namespace
+
ClScatterKernel::ClScatterKernel()
{
}
-Status ClScatterKernel::validate(const ITensorInfo *src,
- const ITensorInfo *updates,
+Status ClScatterKernel::validate(const ITensorInfo *updates,
const ITensorInfo *indices,
const ITensorInfo *dst,
const ScatterInfo &info)
{
- ARM_COMPUTE_UNUSED(src);
- ARM_COMPUTE_UNUSED(updates);
- ARM_COMPUTE_UNUSED(indices);
- ARM_COMPUTE_UNUSED(dst);
ARM_COMPUTE_UNUSED(info);
+ const TensorShape &ind_shape = indices->tensor_shape();
+ const TensorShape &upt_shape = updates->tensor_shape();
+ const TensorShape &dst_shape = dst->tensor_shape();
+
+ const int32_t upt_dims = upt_shape.num_dimensions();
+ const int32_t dst_dims = dst_shape.num_dimensions();
+ const int32_t ind_dims = ind_shape.num_dimensions();
+ const int32_t data_dim = upt_dims - (ind_dims - 1); // Number of batch dims is the number of indices dims - 1
+
+ const int32_t index_len = ind_shape[0];
+ bool unsupported_padding_config =
+ (dst_dims == index_len) && index_len > 1 && (dst->has_padding() || updates->has_padding());
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(unsupported_padding_config, "Padding is not supported with these shapes.");
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(updates, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(indices, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32, DataType::F16, DataType::S32, DataType::S16,
+ DataType::S8, DataType::U32, DataType::U16, DataType::U8);
+
+ // Check data dims in update tensor and output tensor are equal
+ for (int32_t i = 0; i < data_dim; i++)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(upt_shape[i] != dst_shape[i],
+ "Data dims should be same size in both updates and ouput tensor.");
+ }
+
+ // Check if batch dims in indices and updates tensor are equal.
+ for (int32_t i = 0; i < ind_dims - 1; i++)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(upt_shape[data_dim + i] != ind_shape[i + 1],
+ "Batch dimensions should be the same in updates and indices tensor.");
+ }
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(ind_shape[1] != upt_shape[data_dim],
+ "Height of indices tensor should match size of highest dimension in updates tensor "
+ "(Excluding batch dimension)");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ data_dim >= dst_dims, "Update tensor cannot have more dims than output tensor. (Excluding batch dimensions)");
+ ARM_COMPUTE_RETURN_ERROR_ON(index_len != dst_dims - data_dim);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((ind_dims < 2), "Shape of Indices tensor must be at least 2D");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(index_len > max_index_length, "Maximum supported index length is 5!");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(index_len > dst_dims && dst_dims != 1,
+ "Index length should be smaller than or equal to number of output dims");
+
return Status{};
}
+
void ClScatterKernel::configure(const ClCompileContext &compile_context,
- const ITensorInfo *src,
const ITensorInfo *updates,
const ITensorInfo *indices,
ITensorInfo *dst,
const ScatterInfo &info)
{
- ARM_COMPUTE_UNUSED(compile_context);
- ARM_COMPUTE_UNUSED(src);
- ARM_COMPUTE_UNUSED(updates);
- ARM_COMPUTE_UNUSED(indices);
- ARM_COMPUTE_UNUSED(dst);
- ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(updates, dst, indices);
+ ARM_COMPUTE_LOG_PARAMS(updates, indices, dst, info);
+
+ const TensorShape &dst_shape = dst->tensor_shape();
+ const int index_len = indices->dimension(0);
+
+ // Check for single element data block
+ const bool is_scalar_block = (dst->num_dimensions() == static_cast<uint32_t>(index_len));
+
+ const int n0 = adjust_vec_size(16 / updates->element_size(), is_scalar_block ? 1 : updates->dimension(0));
+ const int partial_n0 = updates->dimension(0) % n0;
+
+ // The GWS will be 2D [x, y]
+ // x-dimension refers to the x coordinate of the dst tensor
+ // y-dimension refers to the collapsed y-coordinate of the data part of the dst tensor
+ Window win;
+
+ if (!is_scalar_block)
+ {
+ win = calculate_max_window(dst_shape, Steps(n0));
+
+ // Collapse the dimensions corresponding to indices in the execution window
+ for (int i = 0; i < index_len; ++i)
+ {
+ win.set(dst->num_dimensions() - (i + 1), Window::Dimension(0, 1, 1));
+ }
+
+ win = win.collapse(win, 1);
+ }
+
+ // Set build options
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
+ build_opts.add_option_if(is_data_type_float(dst->data_type()), "-DIS_FLOAT");
+
+ const int num_dims = dst->num_dimensions();
+ TensorShape ind_collapsed = indices->tensor_shape().collapsed_from(1);
+ build_opts.add_option("-DNUM_INDICES=" + support::cpp11::to_string(ind_collapsed[1]));
+ build_opts.add_option("-DINDEX_LENGTH=" + support::cpp11::to_string(index_len));
+
+ // We provide 5 variables to use in a constant array
+ for (int i = 1; i <= max_index_length; i++)
+ {
+ build_opts.add_option("-DOUT_SHAPE_N_MINUS_" + support::cpp11::to_string(i) + "=" +
+ support::cpp11::to_string(dst_shape[std::max(num_dims - i, 0)]));
+ }
+
+ build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
+ build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_n0));
+
+ switch (info.func)
+ {
+ case ScatterFunction::Update:
+ build_opts.add_option("-DSCATTER_FUNCTION=UPDATE_OP");
+ build_opts.add_option("-DSKIP_OUTPUT_READ");
+ break;
+ case ScatterFunction::Add:
+ build_opts.add_option("-DSCATTER_FUNCTION=ADD_OP");
+ break;
+ case ScatterFunction::Sub:
+ build_opts.add_option("-DSCATTER_FUNCTION=SUB_OP");
+ break;
+ case ScatterFunction::Max:
+ build_opts.add_option("-DSCATTER_FUNCTION=MAX_OP");
+ break;
+ case ScatterFunction::Min:
+ build_opts.add_option("-DSCATTER_FUNCTION=MIN_OP");
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+
+ // Create kernel
+ std::string kernel_name = "scatter_mp1d_2d_mpnd";
+ build_opts.add_option("-D" + upper_string(kernel_name));
+
+ ICLKernel::configure_internal(win);
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
+
+ // Set config_id for enabling LWS tuning
+ _config_id = kernel_name;
+ _config_id += "_";
+ _config_id += lower_string(string_from_data_type(updates->data_type()));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(dst->dimension(1));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(dst->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(dst->dimension(2));
+ _config_id += "_";
}
void ClScatterKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
- ARM_COMPUTE_UNUSED(tensors);
- ARM_COMPUTE_UNUSED(window);
- ARM_COMPUTE_UNUSED(queue);
+ const auto updates =
+ utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
+ const auto indices =
+ utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
+ const ITensorInfo *dst_info = dst->info();
+ const ITensorInfo *upd_info = updates->info();
+ const int num_dims = dst_info->num_dimensions();
+ const int ind_dims = indices->info()->num_dimensions();
+ const int index_len = indices->info()->dimension(0);
+
+ bool unsupported_padding_config =
+ num_dims == index_len && index_len > 1 && (dst_info->has_padding() || upd_info->has_padding());
+ if (unsupported_padding_config)
+ {
+ ARM_COMPUTE_ERROR("Unsupported Configuration! Padding not supported with these shapes.");
+ }
+
+ // calculate m-dimensional data block strides in updates and destination tensors
+ const int upt_block_stride =
+ updates->info()->strides_in_bytes()[updates->info()->num_dimensions() - (ind_dims - 1)];
+
+ const int out_block_stride = dst_info->strides_in_bytes()[num_dims - index_len];
+
+ unsigned int idx = 0;
+
+ add_2D_tensor_argument(idx, updates, window);
+ add_2D_tensor_argument(idx, indices, window);
+ add_2D_tensor_argument(idx, dst, window);
+
+ _kernel.setArg<cl_int>(idx++, upt_block_stride);
+ _kernel.setArg<cl_int>(idx++, out_block_stride);
+
+ enqueue(queue, *this, window, lws_hint());
}
} // namespace kernels
diff --git a/src/gpu/cl/kernels/ClScatterKernel.h b/src/gpu/cl/kernels/ClScatterKernel.h
index dda614ff3e..e1b469c88e 100644
--- a/src/gpu/cl/kernels/ClScatterKernel.h
+++ b/src/gpu/cl/kernels/ClScatterKernel.h
@@ -37,6 +37,7 @@ namespace opencl
{
namespace kernels
{
+
class ClScatterKernel : public IClKernel
{
public:
@@ -44,15 +45,15 @@ public:
ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClScatterKernel);
/** Initialise the kernel's input and output.
*
+ * @note Negative indices are treated as out of bounds.
+ *
* @param[in] compile_context The compile context to be used.
- * @param[in] src Input tensor info for the source matrix.
* @param[in] updates Input tensor info for the Update matrix. Data type supported: same as @p src
- * @param[in] indices Input tensor info for the Indices matrix. Data type supported: U32.
+ * @param[in] indices Input tensor info for the Indices matrix. Data type supported: S32.
* @param[out] dst Output tensor info. Data type supported: same as @p src
* @param[in] info Attributes for Scatter Kernel
*/
void configure(const ClCompileContext &compile_context,
- const ITensorInfo *src,
const ITensorInfo *updates,
const ITensorInfo *indices,
ITensorInfo *dst,
@@ -63,11 +64,8 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *src,
- const ITensorInfo *updates,
- const ITensorInfo *indices,
- const ITensorInfo *dst,
- const ScatterInfo &info);
+ static Status
+ validate(const ITensorInfo *updates, const ITensorInfo *indices, const ITensorInfo *dst, const ScatterInfo &info);
// Inherited methods overridden:
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/gpu/cl/operators/ClScatter.cpp b/src/gpu/cl/operators/ClScatter.cpp
index af5fbb86f3..a11ecd7e6a 100644
--- a/src/gpu/cl/operators/ClScatter.cpp
+++ b/src/gpu/cl/operators/ClScatter.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/common/utils/Log.h"
+#include "src/gpu/cl/kernels/ClCopyKernel.h"
#include "src/gpu/cl/kernels/ClFillKernel.h"
#include "src/gpu/cl/kernels/ClScatterKernel.h"
@@ -47,9 +48,19 @@ Status ClScatter::validate(const ITensorInfo *src,
const ScatterInfo &info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(updates, indices, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32);
+ if (src != nullptr)
+ {
+ // Check dst/src are same shape and datatype.
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(src->tensor_shape(), dst->tensor_shape());
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, updates, dst);
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClCopyKernel::validate(src, dst)); // Validate Copy kernel
+ }
+ if (src != dst)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClFillKernel::validate(dst, PixelValue(0.0f))); // Validate Fill kernel.
+ }
- return kernels::ClScatterKernel::validate(src, updates, indices, dst, info);
+ return kernels::ClScatterKernel::validate(updates, indices, dst, info);
}
void ClScatter::configure(const CLCompileContext &compile_context,
@@ -61,11 +72,6 @@ void ClScatter::configure(const CLCompileContext &compile_context,
{
ARM_COMPUTE_ERROR_ON_NULLPTR(updates, indices, dst);
ARM_COMPUTE_LOG_PARAMS(src, indices, dst, info);
- ARM_COMPUTE_UNUSED(src);
- ARM_COMPUTE_UNUSED(updates);
- ARM_COMPUTE_UNUSED(indices);
- ARM_COMPUTE_UNUSED(dst);
- ARM_COMPUTE_UNUSED(info);
// Perform validation step
ARM_COMPUTE_ERROR_THROW_ON(validate(src, updates, indices, dst, info));
@@ -74,19 +80,50 @@ void ClScatter::configure(const CLCompileContext &compile_context,
// If necessary, create fill kernel to fill dst tensor.
if (_fill_zero)
{
- _fill_kernel = std::make_unique<kernels::ClFillKernel>();
+ auto f = std::make_unique<kernels::ClFillKernel>();
+ f->configure(compile_context, dst, PixelValue(0.0f));
+ _fill_kernel = std::move(f);
+ }
+ else if (src != dst) // Check whether copying is necessary
+ {
+ // Fill dst with src copy here.
+ auto j = std::make_unique<kernels::ClCopyKernel>();
+ j->configure(compile_context, src, dst);
+ _copy_kernel = std::move(j);
+ _run_copy = true;
}
// Configure ClScatterKernel
auto k = std::make_unique<kernels::ClScatterKernel>();
k->set_target(CLScheduler::get().target());
- k->configure(compile_context, src, updates, indices, dst, info);
+ k->configure(compile_context, updates, indices, dst, info);
_scatter_kernel = std::move(k);
}
void ClScatter::run(ITensorPack &tensors)
{
- ARM_COMPUTE_UNUSED(tensors);
+ // Get tensors.
+ auto src = tensors.get_const_tensor(ACL_SRC_0);
+ auto updates = tensors.get_const_tensor(ACL_SRC_1);
+ auto indices = tensors.get_const_tensor(ACL_SRC_2);
+ auto dst = tensors.get_tensor(ACL_DST);
+
+ if (_fill_zero)
+ {
+ // Fill destination tensor with 0 values if zero init.
+ ITensorPack fill_pack{{ACL_SRC, dst}};
+ CLScheduler::get().enqueue_op(*_fill_kernel, fill_pack, false);
+ }
+
+ if (_run_copy)
+ {
+ // copy src to dst before scatter op.
+ ITensorPack copy_pack{{ACL_SRC, src}, {ACL_DST, dst}};
+ CLScheduler::get().enqueue_op(*_copy_kernel, copy_pack, false);
+ }
+
+ ITensorPack scatter_pack{{ACL_SRC_0, updates}, {ACL_SRC_1, indices}, {ACL_DST, dst}};
+ CLScheduler::get().enqueue_op(*_scatter_kernel, scatter_pack, false);
}
} // namespace opencl
diff --git a/src/gpu/cl/operators/ClScatter.h b/src/gpu/cl/operators/ClScatter.h
index 433f7ca3a4..a1b32fed45 100644
--- a/src/gpu/cl/operators/ClScatter.h
+++ b/src/gpu/cl/operators/ClScatter.h
@@ -39,6 +39,7 @@ namespace opencl
// Forward declaration
class ClFillKernel;
class ClScatterKernel;
+class ClCopyKernel;
/** Basic operator to execute Scatter on OpenCL. This operator calls the following OpenCL kernels:
*
@@ -56,13 +57,14 @@ public:
* Valid data layouts:
* - All
*
- * @note indices must always be U32
+ * @note indices must always be S32.
+ * @note Negative indices are treated as out of bounds.
* @note src, updates and dst tensors must be same datatype.
*
* @param[in] compile_context The compile context to be used.
* @param[in] src Source input tensor info. Can be nullptr when using "Add" Scatter Function with zero initialization.
* @param[in] updates Tensor info for tensor storing update values to use for scatter function. Data types supported: same as @p src.
- * @param[in] indices Tensor info for tensor storing indices to use for scatter function. Data types supported: U32 only.
+ * @param[in] indices Tensor info for tensor storing indices to use for scatter function. Data types supported: S32 only.
* @param[out] dst Output tensor to store the result of the Scatter Function. Data types supported: same as @p src and @p updates.
* @param[in] Scatter_info Contains Scatter operation information described in @ref ScatterInfo.
*/
@@ -89,7 +91,9 @@ public:
private:
std::unique_ptr<opencl::IClKernel> _scatter_kernel{nullptr};
std::unique_ptr<opencl::IClKernel> _fill_kernel{nullptr};
+ std::unique_ptr<opencl::IClKernel> _copy_kernel{nullptr};
bool _fill_zero{false};
+ bool _run_copy{false};
};
} // namespace opencl
} // namespace arm_compute