From 2fea13593a4753316ae488edf489cb4b00150153 Mon Sep 17 00:00:00 2001 From: Mohammed Suhail Munshi Date: Mon, 29 Apr 2024 22:53:58 +0100 Subject: Add batched indices support to Scatter GPU Implementation Resolves: [COMPMID-6897] Signed-off-by: Mohammed Suhail Munshi Change-Id: I70b1c3c5f0de8484fcb6c3b0cc0d0d8c059b0f58 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11525 Comments-Addressed: Arm Jenkins Reviewed-by: Gunes Bayir Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- tests/validation/CL/ScatterLayer.cpp | 4 ++-- tests/validation/fixtures/ScatterLayerFixture.h | 15 ++++++++------- tests/validation/reference/ScatterLayer.cpp | 1 + 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'tests/validation') diff --git a/tests/validation/CL/ScatterLayer.cpp b/tests/validation/CL/ScatterLayer.cpp index 2970d82572..e327ff9522 100644 --- a/tests/validation/CL/ScatterLayer.cpp +++ b/tests/validation/CL/ScatterLayer.cpp @@ -164,10 +164,10 @@ FIXTURE_DATA_TEST_CASE(RunSmallMultiIndices, CLScatterLayerFixture, frame } // m+k, k-1-D m+n-D case -FIXTURE_DATA_TEST_CASE(RunSmallBatchedMultiIndices, CLScatterLayerFixture, framework::DatasetMode::DISABLED, +FIXTURE_DATA_TEST_CASE(RunSmallBatchedMultiIndices, CLScatterLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallScatterBatchedDataset(), make("DataType", {DataType::F32}), - make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add }), + make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add}), make("ZeroInit", {false}), make("Inplace", {false}))) { diff --git a/tests/validation/fixtures/ScatterLayerFixture.h b/tests/validation/fixtures/ScatterLayerFixture.h index 35e6b647f3..5cd9b8115c 100644 --- a/tests/validation/fixtures/ScatterLayerFixture.h +++ b/tests/validation/fixtures/ScatterLayerFixture.h @@ -103,7 +103,7 @@ protected: void fill_indices(U &&tensor, int i, const TensorShape &shape) { // Calculate max indices the shape should contain. Add an arbitrary value to allow testing for some out of bounds values (In this case min dimension) - const int32_t max = std::max({shape[0] , shape[1], shape[2]}); + const int32_t max = std::min({shape[0] , shape[1], shape[2]}) + 1; library->fill_tensor_uniform(tensor, i, static_cast(-2), static_cast(max)); } @@ -197,12 +197,13 @@ protected: TensorShape src_shape = a_shape; TensorShape updates_shape = b_shape; TensorShape indices_shape = c_shape; + const int num_ind_dims = c_shape.num_dimensions(); // 1. Collapse batch index into a single dim if necessary for update tensor and indices tensor. - if(c_shape.num_dimensions() >= 3) + if(num_ind_dims >= 3) { indices_shape = indices_shape.collapsed_from(1); - updates_shape = updates_shape.collapsed_from(updates_shape.num_dimensions() - 2); // Collapses from last 2 dims + updates_shape = updates_shape.collapsed_from(updates_shape.num_dimensions() - (num_ind_dims -1)); // Collapses batch dims } // 2. Collapse data dims into a single dim. @@ -212,16 +213,16 @@ protected: updates_shape.collapse(updates_shape.num_dimensions() - 1); // Collapse data dims (all except last dim which is batch dim) // Create reference tensors - SimpleTensor src{ a_shape, data_type, 1, a_qinfo }; - SimpleTensor updates{b_shape, data_type, 1, QuantizationInfo() }; - SimpleTensor indices{ c_shape, DataType::S32, 1, QuantizationInfo() }; + SimpleTensor src{ src_shape, data_type, 1, a_qinfo }; + SimpleTensor updates{updates_shape, data_type, 1, QuantizationInfo() }; + SimpleTensor indices{ indices_shape, DataType::S32, 1, QuantizationInfo() }; // Fill reference fill(src, 0 + _hash); fill(updates, 1 + _hash); fill_indices(indices, 2 + _hash, out_shape); - // Calculate individual reference. + // Calculate individual reference using collapsed shapes return reference::scatter_layer(src, updates, indices, out_shape, info); } diff --git a/tests/validation/reference/ScatterLayer.cpp b/tests/validation/reference/ScatterLayer.cpp index c9e6035e14..55c48a9002 100644 --- a/tests/validation/reference/ScatterLayer.cpp +++ b/tests/validation/reference/ScatterLayer.cpp @@ -63,6 +63,7 @@ T reduce_op(const T ¤t,const T &update,const ScatterFunction func) } template float reduce_op(const float ¤t,const float &update,const ScatterFunction func); +template half reduce_op(const half ¤t,const half &update,const ScatterFunction func); } // NOTE: This function expects collapsed tensors as input. -- cgit v1.2.1