aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>2024-04-29 22:53:58 +0100
committerSuhail M <MohammedSuhail.Munshi@arm.com>2024-05-08 12:07:31 +0000
commit2fea13593a4753316ae488edf489cb4b00150153 (patch)
tree423e6369a74c44b505dd8fd4d62bde0946ec2e32
parentc22e1263ba3a6945ceb1fdccb33eac512fd156fb (diff)
downloadComputeLibrary-2fea13593a4753316ae488edf489cb4b00150153.tar.gz
Add batched indices support to Scatter GPU Implementation
Resolves: [COMPMID-6897] Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com> Change-Id: I70b1c3c5f0de8484fcb6c3b0cc0d0d8c059b0f58 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11525 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/gpu/cl/kernels/ClScatterKernel.cpp44
-rw-r--r--tests/datasets/ScatterDataset.h19
-rw-r--r--tests/validation/CL/ScatterLayer.cpp4
-rw-r--r--tests/validation/fixtures/ScatterLayerFixture.h15
-rw-r--r--tests/validation/reference/ScatterLayer.cpp1
5 files changed, 60 insertions, 23 deletions
diff --git a/src/gpu/cl/kernels/ClScatterKernel.cpp b/src/gpu/cl/kernels/ClScatterKernel.cpp
index 21c0253f91..f76a674b27 100644
--- a/src/gpu/cl/kernels/ClScatterKernel.cpp
+++ b/src/gpu/cl/kernels/ClScatterKernel.cpp
@@ -66,6 +66,7 @@ Status ClScatterKernel::validate(const ITensorInfo *updates,
const int32_t upt_dims = upt_shape.num_dimensions();
const int32_t dst_dims = dst_shape.num_dimensions();
const int32_t ind_dims = ind_shape.num_dimensions();
+ const int32_t data_dim = upt_dims - (ind_dims - 1); // Number of batch dims is the number of indices dims - 1
const int32_t index_len = ind_shape[0];
@@ -73,14 +74,34 @@ Status ClScatterKernel::validate(const ITensorInfo *updates,
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(indices, DataType::S32);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32, DataType::F16, DataType::S32, DataType::S16,
DataType::S8, DataType::U32, DataType::U16, DataType::U8);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(ind_dims > 2, "Only 2D indices tensors are currently supported.");
+
+ // Check data dims in update tensor and output tensor are equal
+ for (int32_t i = 0; i < data_dim; i++)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(upt_shape[i] != dst_shape[i],
+ "Data dims should be same size in both updates and ouput tensor.");
+ }
+
+ // Check if batch dims in indices and updates tensor are equal.
+ for (int32_t i = 0; i < ind_dims - 1; i++)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(upt_shape[data_dim + i] != ind_shape[i + 1],
+ "Batch dimensions should be the same in updates and indices tensor.");
+ }
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(ind_shape[1] != upt_shape[data_dim],
+ "Height of indices tensor should match size of highest dimension in updates tensor "
+ "(Excluding batch dimension)");
+
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- ind_shape[1] != upt_shape[upt_dims - 1],
- "Height of indices tensor should match size of highest dimension in updates tensor.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(upt_dims > dst_dims, "Update tensor cannot have more dims than output tensor.");
+ data_dim >= dst_dims, "Update tensor cannot have more dims than output tensor. (Excluding batch dimensions)");
+ ARM_COMPUTE_RETURN_ERROR_ON(index_len != dst_dims - data_dim);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((ind_dims < 2), "Shape of Indices tensor must be at least 2D");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(index_len > max_index_length, "Maximum supported index length is 5!");
- ARM_COMPUTE_RETURN_ERROR_ON(index_len != dst_dims - upt_dims + 1);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ index_len >= dst_dims && dst_dims != 1,
+ "Index length should be smaller than number of output dims (or equal to with 1D output)");
return Status{};
}
@@ -96,7 +117,7 @@ void ClScatterKernel::configure(const ClCompileContext &compile_context,
const TensorShape &dst_shape = dst->tensor_shape();
- const bool is_scalar_block = updates->num_dimensions() == 1;
+ const bool is_scalar_block = updates->num_dimensions() == 1; // Checks for replacing only a single element.
const int n0 = adjust_vec_size(16 / updates->element_size(), is_scalar_block ? 1 : updates->dimension(0));
const int partial_n0 = updates->dimension(0) % n0;
@@ -120,9 +141,9 @@ void ClScatterKernel::configure(const ClCompileContext &compile_context,
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
build_opts.add_option_if(is_data_type_float(dst->data_type()), "-DIS_FLOAT");
- const int num_dims = dst->num_dimensions();
-
- build_opts.add_option("-DNUM_INDICES=" + support::cpp11::to_string(indices->dimension(1)));
+ const int num_dims = dst->num_dimensions();
+ TensorShape ind_collapsed = indices->tensor_shape().collapsed_from(1);
+ build_opts.add_option("-DNUM_INDICES=" + support::cpp11::to_string(ind_collapsed[1]));
build_opts.add_option("-DINDEX_LENGTH=" + support::cpp11::to_string(index_len));
// We provide 5 variables to use in a constant array
@@ -187,11 +208,14 @@ void ClScatterKernel::run_op(ITensorPack &tensors, const Window &window, cl::Com
const ITensorInfo *dst_info = dst->info();
const int num_dims = dst_info->num_dimensions();
+ const int ind_dims = indices->info()->num_dimensions();
const int index_len = indices->info()->dimension(0);
// calculate m-dimensional data block strides in updates and destination tensors
- const int upt_block_stride = updates->info()->strides_in_bytes()[updates->info()->num_dimensions() - 1];
+ const int upt_block_stride =
+ updates->info()->strides_in_bytes()[updates->info()->num_dimensions() - (ind_dims - 1)];
+
const int out_block_stride = dst_info->strides_in_bytes()[num_dims - index_len];
unsigned int idx = 0;
diff --git a/tests/datasets/ScatterDataset.h b/tests/datasets/ScatterDataset.h
index 9dcf859a8f..4ad269ec85 100644
--- a/tests/datasets/ScatterDataset.h
+++ b/tests/datasets/ScatterDataset.h
@@ -179,10 +179,21 @@ public:
// NOTE: Config is src, updates, indices, output.
// NOTE: Updates/Indices tensors are now batched.
// NOTE: indices.shape.x = (updates_batched) ? (src.num_dimensions - updates.num_dimensions) + 2 : (src.num_dimensions - updates.num_dimensions) + 1
+ // k is the number of batch dimensions
+
+ // k = 2
add_config(TensorShape(6U, 5U), TensorShape(6U, 2U, 2U), TensorShape(1U, 2U, 2U), TensorShape(6U, 5U));
- add_config(TensorShape(6U, 5U, 2U), TensorShape(6U, 2U, 2U), TensorShape(2U, 2U, 2U), TensorShape(6U, 5U, 2U));
- add_config(TensorShape(6U, 5U, 2U, 2U), TensorShape(3U, 2U), TensorShape(4U, 3U, 2U), TensorShape(6U, 5U, 2U, 2U));
- add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(6U, 2U), TensorShape(5U, 6U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
+ add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(5U, 5U, 6U, 2U), TensorShape(3U, 6U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
+
+ // k = 3
+ add_config(TensorShape(6U, 5U), TensorShape(6U, 2U, 2U, 2U), TensorShape(1U, 2U, 2U, 2U), TensorShape(6U, 5U));
+ add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(5U, 5U, 3U, 6U, 2U), TensorShape(3U, 3U, 6U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
+
+ // k = 4
+ add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(5U, 6U, 2U, 3U, 2U), TensorShape(4U, 6U, 2U, 3U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
+
+ // k = 5
+ add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(5U, 3U, 4U, 3U, 2U, 2U), TensorShape(4U, 3U, 4U, 3U, 2U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
}
};
@@ -196,7 +207,7 @@ public:
add_config(TensorShape(9U, 3U, 4U), TensorShape(9U, 3U, 2U), TensorShape(1U, 2U), TensorShape(9U, 3U, 4U));
add_config(TensorShape(35U, 4U, 3U, 2U, 2U), TensorShape(35U, 4U), TensorShape(4U, 4U), TensorShape(35U, 4U, 3U, 2U, 2U));
add_config(TensorShape(11U, 3U, 3U, 2U, 4U), TensorShape(11U, 3U, 3U, 4U), TensorShape(2U, 4U), TensorShape(11U, 3U, 3U, 2U, 4U));
- // TODO: add_config(TensorShape(6U, 5U, 2U), TensorShape(6U, 2U, 2U), TensorShape(2U, 2U, 2U), TensorShape(6U, 5U, 2U));
+ add_config(TensorShape(6U, 5U, 2U), TensorShape(6U, 2U, 2U), TensorShape(2U, 2U, 2U), TensorShape(6U, 5U, 2U));
}
};
} // namespace datasets
diff --git a/tests/validation/CL/ScatterLayer.cpp b/tests/validation/CL/ScatterLayer.cpp
index 2970d82572..e327ff9522 100644
--- a/tests/validation/CL/ScatterLayer.cpp
+++ b/tests/validation/CL/ScatterLayer.cpp
@@ -164,10 +164,10 @@ FIXTURE_DATA_TEST_CASE(RunSmallMultiIndices, CLScatterLayerFixture<float>, frame
}
// m+k, k-1-D m+n-D case
-FIXTURE_DATA_TEST_CASE(RunSmallBatchedMultiIndices, CLScatterLayerFixture<float>, framework::DatasetMode::DISABLED,
+FIXTURE_DATA_TEST_CASE(RunSmallBatchedMultiIndices, CLScatterLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
combine(datasets::SmallScatterBatchedDataset(),
make("DataType", {DataType::F32}),
- make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add }),
+ make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add}),
make("ZeroInit", {false}),
make("Inplace", {false})))
{
diff --git a/tests/validation/fixtures/ScatterLayerFixture.h b/tests/validation/fixtures/ScatterLayerFixture.h
index 35e6b647f3..5cd9b8115c 100644
--- a/tests/validation/fixtures/ScatterLayerFixture.h
+++ b/tests/validation/fixtures/ScatterLayerFixture.h
@@ -103,7 +103,7 @@ protected:
void fill_indices(U &&tensor, int i, const TensorShape &shape)
{
// Calculate max indices the shape should contain. Add an arbitrary value to allow testing for some out of bounds values (In this case min dimension)
- const int32_t max = std::max({shape[0] , shape[1], shape[2]});
+ const int32_t max = std::min({shape[0] , shape[1], shape[2]}) + 1;
library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(-2), static_cast<int32_t>(max));
}
@@ -197,12 +197,13 @@ protected:
TensorShape src_shape = a_shape;
TensorShape updates_shape = b_shape;
TensorShape indices_shape = c_shape;
+ const int num_ind_dims = c_shape.num_dimensions();
// 1. Collapse batch index into a single dim if necessary for update tensor and indices tensor.
- if(c_shape.num_dimensions() >= 3)
+ if(num_ind_dims >= 3)
{
indices_shape = indices_shape.collapsed_from(1);
- updates_shape = updates_shape.collapsed_from(updates_shape.num_dimensions() - 2); // Collapses from last 2 dims
+ updates_shape = updates_shape.collapsed_from(updates_shape.num_dimensions() - (num_ind_dims -1)); // Collapses batch dims
}
// 2. Collapse data dims into a single dim.
@@ -212,16 +213,16 @@ protected:
updates_shape.collapse(updates_shape.num_dimensions() - 1); // Collapse data dims (all except last dim which is batch dim)
// Create reference tensors
- SimpleTensor<T> src{ a_shape, data_type, 1, a_qinfo };
- SimpleTensor<T> updates{b_shape, data_type, 1, QuantizationInfo() };
- SimpleTensor<int32_t> indices{ c_shape, DataType::S32, 1, QuantizationInfo() };
+ SimpleTensor<T> src{ src_shape, data_type, 1, a_qinfo };
+ SimpleTensor<T> updates{updates_shape, data_type, 1, QuantizationInfo() };
+ SimpleTensor<int32_t> indices{ indices_shape, DataType::S32, 1, QuantizationInfo() };
// Fill reference
fill(src, 0 + _hash);
fill(updates, 1 + _hash);
fill_indices(indices, 2 + _hash, out_shape);
- // Calculate individual reference.
+ // Calculate individual reference using collapsed shapes
return reference::scatter_layer<T>(src, updates, indices, out_shape, info);
}
diff --git a/tests/validation/reference/ScatterLayer.cpp b/tests/validation/reference/ScatterLayer.cpp
index c9e6035e14..55c48a9002 100644
--- a/tests/validation/reference/ScatterLayer.cpp
+++ b/tests/validation/reference/ScatterLayer.cpp
@@ -63,6 +63,7 @@ T reduce_op(const T &current,const T &update,const ScatterFunction func)
}
template float reduce_op(const float &current,const float &update,const ScatterFunction func);
+template half reduce_op(const half &current,const half &update,const ScatterFunction func);
}
// NOTE: This function expects collapsed tensors as input.