aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/datasets/ScatterDataset.h14
-rw-r--r--tests/validation/CL/ScatterLayer.cpp52
-rw-r--r--tests/validation/NEON/SoftmaxLayer.cpp2
-rw-r--r--tests/validation/NEON/UNIT/RuntimeContext.cpp15
-rw-r--r--tests/validation/fixtures/ScatterLayerFixture.h23
5 files changed, 81 insertions, 25 deletions
diff --git a/tests/datasets/ScatterDataset.h b/tests/datasets/ScatterDataset.h
index 4ad269ec85..8fd4448d2d 100644
--- a/tests/datasets/ScatterDataset.h
+++ b/tests/datasets/ScatterDataset.h
@@ -180,7 +180,6 @@ public:
// NOTE: Updates/Indices tensors are now batched.
// NOTE: indices.shape.x = (updates_batched) ? (src.num_dimensions - updates.num_dimensions) + 2 : (src.num_dimensions - updates.num_dimensions) + 1
// k is the number of batch dimensions
-
// k = 2
add_config(TensorShape(6U, 5U), TensorShape(6U, 2U, 2U), TensorShape(1U, 2U, 2U), TensorShape(6U, 5U));
add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(5U, 5U, 6U, 2U), TensorShape(3U, 6U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
@@ -197,6 +196,18 @@ public:
}
};
+class SmallScatterScalarDataset final : public ScatterDataset
+{
+public:
+ // batched scalar case
+ SmallScatterScalarDataset()
+ {
+ add_config(TensorShape(6U, 5U), TensorShape(6U), TensorShape(2U, 6U), TensorShape(6U, 5U));
+ add_config(TensorShape(6U, 5U), TensorShape(6U, 6U), TensorShape(2U, 6U, 6U), TensorShape(6U, 5U));
+ add_config(TensorShape(3U, 3U, 6U, 5U), TensorShape(6U, 6U), TensorShape(4U, 6U, 6U), TensorShape(3U, 3U, 6U, 5U));
+ }
+};
+
// This dataset is for data types that does not require full testing. It contains selected tests from the above.
class SmallScatterMixedDataset final : public ScatterDataset
{
@@ -205,6 +216,7 @@ public:
{
add_config(TensorShape(10U), TensorShape(2U), TensorShape(1U, 2U), TensorShape(10U));
add_config(TensorShape(9U, 3U, 4U), TensorShape(9U, 3U, 2U), TensorShape(1U, 2U), TensorShape(9U, 3U, 4U));
+ add_config(TensorShape(6U, 5U), TensorShape(6U, 6U), TensorShape(2U, 6U, 6U), TensorShape(6U, 5U));
add_config(TensorShape(35U, 4U, 3U, 2U, 2U), TensorShape(35U, 4U), TensorShape(4U, 4U), TensorShape(35U, 4U, 3U, 2U, 2U));
add_config(TensorShape(11U, 3U, 3U, 2U, 4U), TensorShape(11U, 3U, 3U, 4U), TensorShape(2U, 4U), TensorShape(11U, 3U, 3U, 2U, 4U));
add_config(TensorShape(6U, 5U, 2U), TensorShape(6U, 2U, 2U), TensorShape(2U, 2U, 2U), TensorShape(6U, 5U, 2U));
diff --git a/tests/validation/CL/ScatterLayer.cpp b/tests/validation/CL/ScatterLayer.cpp
index e327ff9522..b1531eb64a 100644
--- a/tests/validation/CL/ScatterLayer.cpp
+++ b/tests/validation/CL/ScatterLayer.cpp
@@ -125,7 +125,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLScatterLayerFixture<float>, framework::Datase
make("DataType", {DataType::F32}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {true})))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
@@ -136,7 +137,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallZeroInit, CLScatterLayerFixture<float>, framework
make("DataType", {DataType::F32}),
make("ScatterFunction", {ScatterFunction::Add}),
make("ZeroInit", {true}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {true})))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
@@ -147,7 +149,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMultiDim, CLScatterLayerFixture<float>, framework
make("DataType", {DataType::F32}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {true})))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
@@ -158,7 +161,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMultiIndices, CLScatterLayerFixture<float>, frame
make("DataType", {DataType::F32}),
make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add }),
make("ZeroInit", {false}),
- make("Inplace", {false, true})))
+ make("Inplace", {false, true}),
+ make("Padding", {true})))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
@@ -169,20 +173,38 @@ FIXTURE_DATA_TEST_CASE(RunSmallBatchedMultiIndices, CLScatterLayerFixture<float>
make("DataType", {DataType::F32}),
make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add}),
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {true})))
+{
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+
+// m+k, k-1-D m+n-D case
+FIXTURE_DATA_TEST_CASE(RunSmallScatterScalar, CLScatterLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallScatterScalarDataset(),
+ make("DataType", {DataType::F32}),
+ make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add}),
+ make("ZeroInit", {false}),
+ make("Inplace", {false}),
+ make("Padding", {false}))) // NOTE: Padding not supported in this datset
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
TEST_SUITE_END() // FP32
+
+// NOTE: Padding is disabled for the SmallScatterMixedDataset due certain shapes not supporting padding.
+// Padding is well tested in F32 Datatype test cases.
+
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
combine(datasets::SmallScatterMixedDataset(),
make("DataType", {DataType::F16}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_f16);
}
@@ -196,7 +218,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<int32_t>, framework:
make("DataType", {DataType::S32}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -208,7 +231,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<int16_t>, framework:
make("DataType", {DataType::S16}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -220,7 +244,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<int8_t>, framework::
make("DataType", {DataType::S8}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -232,7 +257,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<uint32_t>, framework
make("DataType", {DataType::U32}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -244,7 +270,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<uint16_t>, framework
make("DataType", {DataType::U16}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -256,7 +283,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<uint8_t>, framework:
make("DataType", {DataType::U8}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
index 8da5a0d953..94d0866c38 100644
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ b/tests/validation/NEON/SoftmaxLayer.cpp
@@ -145,7 +145,7 @@ DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL,
cpu_isa.fp16 = (data_type == DataType::F16);
const auto *selected_impl = CpuSoftmaxKernel::get_implementation(
- SoftmaxKernelDataTypeISASelectorData{ data_type, cpu_isa, false /* is_log */, 0 /* axis */},
+ SoftmaxKernelDataTypeISASelectorData{ data_type, cpu_isa, false /* is_log */, 0 /* axis */, CPUInfo::get().get_sme2_vector_length()},
cpu::KernelSelectionType::Preferred);
ARM_COMPUTE_ERROR_ON_NULLPTR(selected_impl);
diff --git a/tests/validation/NEON/UNIT/RuntimeContext.cpp b/tests/validation/NEON/UNIT/RuntimeContext.cpp
index 819811943d..e0d45c639a 100644
--- a/tests/validation/NEON/UNIT/RuntimeContext.cpp
+++ b/tests/validation/NEON/UNIT/RuntimeContext.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,6 +48,19 @@ namespace validation
{
TEST_SUITE(NEON)
TEST_SUITE(UNIT)
+#if defined(ARM_COMPUTE_OPENMP_SCHEDULER) && !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)
+TEST_CASE(CpuCapacity, framework::DatasetMode::ALL)
+{
+ CPUInfo& ci = arm_compute::Scheduler::get().cpu_info();
+ const uint32_t nonlittle_num_cpus = ci.get_cpu_num_excluding_little();
+ const uint32_t num_threads = arm_compute::Scheduler::get().num_threads();
+
+ ARM_COMPUTE_EXPECT(num_threads<=nonlittle_num_cpus , framework::LogLevel::ERRORS);
+}
+#endif /* defined(ARM_COMPUTE_OPENMP_SCHEDULER) && !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)*/
+
TEST_SUITE(RuntimeContext)
TEST_CASE(Scheduler, framework::DatasetMode::ALL)
diff --git a/tests/validation/fixtures/ScatterLayerFixture.h b/tests/validation/fixtures/ScatterLayerFixture.h
index 5cd9b8115c..af161ef98b 100644
--- a/tests/validation/fixtures/ScatterLayerFixture.h
+++ b/tests/validation/fixtures/ScatterLayerFixture.h
@@ -48,7 +48,7 @@ class ScatterGenericValidationFixture : public framework::Fixture
{
public:
void setup(TensorShape src_shape, TensorShape updates_shape, TensorShape indices_shape,
- TensorShape out_shape, DataType data_type, ScatterInfo scatter_info, bool inplace,
+ TensorShape out_shape, DataType data_type, ScatterInfo scatter_info, bool inplace, bool padding,
QuantizationInfo src_qinfo = QuantizationInfo(), QuantizationInfo o_qinfo = QuantizationInfo())
{
// this is for improving randomness across tests
@@ -57,7 +57,7 @@ public:
+ updates_shape[4] + updates_shape[5]
+ indices_shape[0] + indices_shape[1] + indices_shape[2] + indices_shape[3];
- _target = compute_target(src_shape, updates_shape, indices_shape, out_shape, data_type, scatter_info, inplace, src_qinfo, o_qinfo);
+ _target = compute_target(src_shape, updates_shape, indices_shape, out_shape, data_type, scatter_info, inplace, padding, src_qinfo, o_qinfo);
_reference = compute_reference(src_shape, updates_shape, indices_shape, out_shape, data_type,scatter_info, src_qinfo , o_qinfo);
}
@@ -104,11 +104,11 @@ protected:
{
// Calculate max indices the shape should contain. Add an arbitrary value to allow testing for some out of bounds values (In this case min dimension)
const int32_t max = std::min({shape[0] , shape[1], shape[2]}) + 1;
- library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(-2), static_cast<int32_t>(max));
+ library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(0), static_cast<int32_t>(max));
}
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
- const TensorShape &out_shape, DataType data_type, const ScatterInfo info, bool inplace,
+ const TensorShape &out_shape, DataType data_type, const ScatterInfo info, bool inplace, bool padding,
QuantizationInfo a_qinfo, QuantizationInfo o_qinfo)
{
// 1. Create relevant tensors using ScatterInfo data structure.
@@ -146,11 +146,14 @@ protected:
ARM_COMPUTE_ASSERT(indices.info()->is_resizable());
ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
- add_padding_x({ &src, &updates, &indices});
-
- if(!inplace)
+ if(padding)
{
- add_padding_x({ &dst });
+ add_padding_x({ &src, &updates, &indices});
+
+ if(!inplace)
+ {
+ add_padding_x({ &dst });
+ }
}
// Allocate tensors
@@ -237,10 +240,10 @@ class ScatterValidationFixture : public ScatterGenericValidationFixture<TensorTy
{
public:
void setup(TensorShape src_shape, TensorShape update_shape, TensorShape indices_shape,
- TensorShape out_shape, DataType data_type, ScatterFunction func, bool zero_init, bool inplace)
+ TensorShape out_shape, DataType data_type, ScatterFunction func, bool zero_init, bool inplace, bool padding)
{
ScatterGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, update_shape,
- indices_shape, out_shape, data_type, ScatterInfo(func, zero_init), inplace,
+ indices_shape, out_shape, data_type, ScatterInfo(func, zero_init), inplace, padding,
QuantizationInfo(), QuantizationInfo());
}
};