aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2023-01-03 17:59:14 +0000
committerViet-Hoa Do <viet-hoa.do@arm.com>2023-01-06 13:45:20 +0000
commitb3077fbaee868579f9a41888fef1f71286d6757c (patch)
tree23e6a6c63dc860697ae8e9301da7ddbb29d62c98 /tests
parent3558c5840e7c973e2b1a86ae3a9335b44cad59d4 (diff)
downloadComputeLibrary-b3077fbaee868579f9a41888fef1f71286d6757c.tar.gz
LHS broadcasting addition for dynamic fusion
* Binary elementwise operator now can have broadcasting in either X dimension, Y+Z dimension, or both, in either LHS or RHS operand. * Fix bug in CL code to support batching. Resolves: COMPMID-5704 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I51b04986d30861f255ca9f754adffa0e6c85a26b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8898 Reviewed-by: SiCong Li <sicong.li@arm.com> Reviewed-by: Ramy Elgammal <ramy.elgammal@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Dynamic-Fusion: Ramy Elgammal <ramy.elgammal@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/datasets/ShapeDatasets.h18
-rw-r--r--tests/validation/dynamic_fusion/gpu/cl/Add.cpp16
2 files changed, 24 insertions, 10 deletions
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 047457c99e..c1e61444a8 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2022 Arm Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -308,13 +308,21 @@ public:
: ZipDataset<ShapeDataset, ShapeDataset>(
ShapeDataset("Shape0",
{
- TensorShape{ 9U, 9U, 5U },
- TensorShape{ 27U, 13U, 2U },
+ TensorShape{ 1U, 3U, 4U, 2U }, // LHS broadcast X
+ TensorShape{ 6U, 4U, 2U, 3U }, // RHS broadcast X
+ TensorShape{ 7U, 1U, 1U, 4U }, // LHS broadcast Y, Z
+ TensorShape{ 8U, 5U, 6U, 3U }, // RHS broadcast Y, Z
+ TensorShape{ 1U, 1U, 1U, 2U }, // LHS broadcast X, Y, Z
+ TensorShape{ 2U, 6U, 4U, 3U }, // RHS broadcast X, Y, Z
}),
ShapeDataset("Shape1",
{
- TensorShape{ 1U, 1U, 1U }, // Broadcast in X, Y, Z
- TensorShape{ 27U, 1U, 1U }, // Broadcast in Y and Z
+ TensorShape{ 5U, 3U, 4U, 2U },
+ TensorShape{ 1U, 4U, 2U, 3U },
+ TensorShape{ 7U, 2U, 3U, 4U },
+ TensorShape{ 8U, 1U, 1U, 3U },
+ TensorShape{ 4U, 7U, 3U, 2U },
+ TensorShape{ 1U, 1U, 1U, 3U },
}))
{
}
diff --git a/tests/validation/dynamic_fusion/gpu/cl/Add.cpp b/tests/validation/dynamic_fusion/gpu/cl/Add.cpp
index 3743fbb664..1451ab3de8 100644
--- a/tests/validation/dynamic_fusion/gpu/cl/Add.cpp
+++ b/tests/validation/dynamic_fusion/gpu/cl/Add.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,9 +54,11 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), // S16 is valid data type for Add
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32), // S32 is valid data type for Add
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(32U, 1U, 1U), 1, DataType::F32), // Broadcasting not allowed for lhs
+ TensorInfo(TensorShape(32U, 1U, 1U), 1, DataType::F32), // Broadcasting allowed for lhs
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U, 2), 1, DataType::F32), // Batching not supported
+ TensorInfo(TensorShape(15U, 23U, 3U), 1, DataType::F32), // Broadcast Y dimension is not allowed
+ TensorInfo(TensorShape( 3U, 8U, 9U), 1, DataType::S16), // Broadcast Z dimension is not allowed
+ TensorInfo(TensorShape(32U, 13U, 2U, 2), 1, DataType::F32), // Batching is allowed
}),
framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16),
@@ -65,7 +67,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 1U, 1U), 1, DataType::F32), // Broadcasting allowed for rhs
- TensorInfo(TensorShape(32U, 13U, 2U, 2), 1, DataType::F32), // Batching not supported
+ TensorInfo(TensorShape(15U, 1U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape( 3U, 8U, 1U), 1, DataType::S16),
+ TensorInfo(TensorShape(32U, 13U, 2U, 2), 1, DataType::F32),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
@@ -74,9 +78,11 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(15U, 23U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape( 3U, 8U, 9U), 1, DataType::S16),
TensorInfo(TensorShape(32U, 13U, 2U, 2), 1, DataType::F32),
})),
- framework::dataset::make("Expected", { true, false, true, true, false, false, true, false})),
+ framework::dataset::make("Expected", { true, false, true, true, false, true, true, false, false, true})),
input1_info, input2_info, output_info, expected)
{
// Create a new workload sketch