aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/datasets/ScatterDataset.h31
-rw-r--r--tests/validation/CL/ScatterLayer.cpp56
-rw-r--r--tests/validation/NEON/SoftmaxLayer.cpp2
-rw-r--r--tests/validation/NEON/UNIT/RuntimeContext.cpp20
-rw-r--r--tests/validation/fixtures/ScatterLayerFixture.h38
-rw-r--r--tests/validation/reference/ScatterLayer.cpp1
6 files changed, 111 insertions, 37 deletions
diff --git a/tests/datasets/ScatterDataset.h b/tests/datasets/ScatterDataset.h
index 9dcf859a8f..8fd4448d2d 100644
--- a/tests/datasets/ScatterDataset.h
+++ b/tests/datasets/ScatterDataset.h
@@ -179,10 +179,32 @@ public:
// NOTE: Config is src, updates, indices, output.
// NOTE: Updates/Indices tensors are now batched.
// NOTE: indices.shape.x = (updates_batched) ? (src.num_dimensions - updates.num_dimensions) + 2 : (src.num_dimensions - updates.num_dimensions) + 1
+ // k is the number of batch dimensions
+ // k = 2
add_config(TensorShape(6U, 5U), TensorShape(6U, 2U, 2U), TensorShape(1U, 2U, 2U), TensorShape(6U, 5U));
- add_config(TensorShape(6U, 5U, 2U), TensorShape(6U, 2U, 2U), TensorShape(2U, 2U, 2U), TensorShape(6U, 5U, 2U));
- add_config(TensorShape(6U, 5U, 2U, 2U), TensorShape(3U, 2U), TensorShape(4U, 3U, 2U), TensorShape(6U, 5U, 2U, 2U));
- add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(6U, 2U), TensorShape(5U, 6U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
+ add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(5U, 5U, 6U, 2U), TensorShape(3U, 6U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
+
+ // k = 3
+ add_config(TensorShape(6U, 5U), TensorShape(6U, 2U, 2U, 2U), TensorShape(1U, 2U, 2U, 2U), TensorShape(6U, 5U));
+ add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(5U, 5U, 3U, 6U, 2U), TensorShape(3U, 3U, 6U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
+
+ // k = 4
+ add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(5U, 6U, 2U, 3U, 2U), TensorShape(4U, 6U, 2U, 3U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
+
+ // k = 5
+ add_config(TensorShape(5U, 5U, 4U, 2U, 2U), TensorShape(5U, 3U, 4U, 3U, 2U, 2U), TensorShape(4U, 3U, 4U, 3U, 2U, 2U), TensorShape(5U, 5U, 4U, 2U, 2U));
+ }
+};
+
+class SmallScatterScalarDataset final : public ScatterDataset
+{
+public:
+ // batched scalar case
+ SmallScatterScalarDataset()
+ {
+ add_config(TensorShape(6U, 5U), TensorShape(6U), TensorShape(2U, 6U), TensorShape(6U, 5U));
+ add_config(TensorShape(6U, 5U), TensorShape(6U, 6U), TensorShape(2U, 6U, 6U), TensorShape(6U, 5U));
+ add_config(TensorShape(3U, 3U, 6U, 5U), TensorShape(6U, 6U), TensorShape(4U, 6U, 6U), TensorShape(3U, 3U, 6U, 5U));
}
};
@@ -194,9 +216,10 @@ public:
{
add_config(TensorShape(10U), TensorShape(2U), TensorShape(1U, 2U), TensorShape(10U));
add_config(TensorShape(9U, 3U, 4U), TensorShape(9U, 3U, 2U), TensorShape(1U, 2U), TensorShape(9U, 3U, 4U));
+ add_config(TensorShape(6U, 5U), TensorShape(6U, 6U), TensorShape(2U, 6U, 6U), TensorShape(6U, 5U));
add_config(TensorShape(35U, 4U, 3U, 2U, 2U), TensorShape(35U, 4U), TensorShape(4U, 4U), TensorShape(35U, 4U, 3U, 2U, 2U));
add_config(TensorShape(11U, 3U, 3U, 2U, 4U), TensorShape(11U, 3U, 3U, 4U), TensorShape(2U, 4U), TensorShape(11U, 3U, 3U, 2U, 4U));
- // TODO: add_config(TensorShape(6U, 5U, 2U), TensorShape(6U, 2U, 2U), TensorShape(2U, 2U, 2U), TensorShape(6U, 5U, 2U));
+ add_config(TensorShape(6U, 5U, 2U), TensorShape(6U, 2U, 2U), TensorShape(2U, 2U, 2U), TensorShape(6U, 5U, 2U));
}
};
} // namespace datasets
diff --git a/tests/validation/CL/ScatterLayer.cpp b/tests/validation/CL/ScatterLayer.cpp
index 2970d82572..b1531eb64a 100644
--- a/tests/validation/CL/ScatterLayer.cpp
+++ b/tests/validation/CL/ScatterLayer.cpp
@@ -125,7 +125,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLScatterLayerFixture<float>, framework::Datase
make("DataType", {DataType::F32}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {true})))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
@@ -136,7 +137,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallZeroInit, CLScatterLayerFixture<float>, framework
make("DataType", {DataType::F32}),
make("ScatterFunction", {ScatterFunction::Add}),
make("ZeroInit", {true}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {true})))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
@@ -147,7 +149,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMultiDim, CLScatterLayerFixture<float>, framework
make("DataType", {DataType::F32}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {true})))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
@@ -158,31 +161,50 @@ FIXTURE_DATA_TEST_CASE(RunSmallMultiIndices, CLScatterLayerFixture<float>, frame
make("DataType", {DataType::F32}),
make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add }),
make("ZeroInit", {false}),
- make("Inplace", {false, true})))
+ make("Inplace", {false, true}),
+ make("Padding", {true})))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
// m+k, k-1-D m+n-D case
-FIXTURE_DATA_TEST_CASE(RunSmallBatchedMultiIndices, CLScatterLayerFixture<float>, framework::DatasetMode::DISABLED,
+FIXTURE_DATA_TEST_CASE(RunSmallBatchedMultiIndices, CLScatterLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
combine(datasets::SmallScatterBatchedDataset(),
make("DataType", {DataType::F32}),
- make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add }),
+ make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add}),
+ make("ZeroInit", {false}),
+ make("Inplace", {false}),
+ make("Padding", {true})))
+{
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+
+// m+k, k-1-D m+n-D case
+FIXTURE_DATA_TEST_CASE(RunSmallScatterScalar, CLScatterLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallScatterScalarDataset(),
+ make("DataType", {DataType::F32}),
+ make("ScatterFunction", {ScatterFunction::Update, ScatterFunction::Add}),
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false}))) // NOTE: Padding not supported in this datset
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
TEST_SUITE_END() // FP32
+
+// NOTE: Padding is disabled for the SmallScatterMixedDataset due certain shapes not supporting padding.
+// Padding is well tested in F32 Datatype test cases.
+
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
combine(datasets::SmallScatterMixedDataset(),
make("DataType", {DataType::F16}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_f16);
}
@@ -196,7 +218,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<int32_t>, framework:
make("DataType", {DataType::S32}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -208,7 +231,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<int16_t>, framework:
make("DataType", {DataType::S16}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -220,7 +244,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<int8_t>, framework::
make("DataType", {DataType::S8}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -232,7 +257,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<uint32_t>, framework
make("DataType", {DataType::U32}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -244,7 +270,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<uint16_t>, framework
make("DataType", {DataType::U16}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
@@ -256,7 +283,8 @@ FIXTURE_DATA_TEST_CASE(RunSmallMixed, CLScatterLayerFixture<uint8_t>, framework:
make("DataType", {DataType::U8}),
allScatterFunctions,
make("ZeroInit", {false}),
- make("Inplace", {false})))
+ make("Inplace", {false}),
+ make("Padding", {false})))
{
validate(CLAccessor(_target), _reference, tolerance_int);
}
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
index 8da5a0d953..94d0866c38 100644
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ b/tests/validation/NEON/SoftmaxLayer.cpp
@@ -145,7 +145,7 @@ DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL,
cpu_isa.fp16 = (data_type == DataType::F16);
const auto *selected_impl = CpuSoftmaxKernel::get_implementation(
- SoftmaxKernelDataTypeISASelectorData{ data_type, cpu_isa, false /* is_log */, 0 /* axis */},
+ SoftmaxKernelDataTypeISASelectorData{ data_type, cpu_isa, false /* is_log */, 0 /* axis */, CPUInfo::get().get_sme2_vector_length()},
cpu::KernelSelectionType::Preferred);
ARM_COMPUTE_ERROR_ON_NULLPTR(selected_impl);
diff --git a/tests/validation/NEON/UNIT/RuntimeContext.cpp b/tests/validation/NEON/UNIT/RuntimeContext.cpp
index 819811943d..e126aded28 100644
--- a/tests/validation/NEON/UNIT/RuntimeContext.cpp
+++ b/tests/validation/NEON/UNIT/RuntimeContext.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,6 +48,24 @@ namespace validation
{
TEST_SUITE(NEON)
TEST_SUITE(UNIT)
+#if defined(ARM_COMPUTE_OPENMP_SCHEDULER) && !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)
+TEST_CASE(CpuCapacity, framework::DatasetMode::ALL)
+{
+ CPUInfo& ci = arm_compute::Scheduler::get().cpu_info();
+ const uint32_t total_num_cpus = ci.get_cpu_num();
+ const uint32_t nonlittle_num_cpus = ci.get_cpu_num_excluding_little();
+ const bool has_lmb = ci.cpu_has_little_mid_big();
+ const uint32_t num_threads = arm_compute::Scheduler::get().num_threads();
+
+ if(has_lmb){
+ ARM_COMPUTE_EXPECT(total_num_cpus!=nonlittle_num_cpus , framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(num_threads==nonlittle_num_cpus , framework::LogLevel::ERRORS);
+ }
+}
+#endif /* defined(ARM_COMPUTE_OPENMP_SCHEDULER) && !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)*/
+
TEST_SUITE(RuntimeContext)
TEST_CASE(Scheduler, framework::DatasetMode::ALL)
diff --git a/tests/validation/fixtures/ScatterLayerFixture.h b/tests/validation/fixtures/ScatterLayerFixture.h
index 35e6b647f3..af161ef98b 100644
--- a/tests/validation/fixtures/ScatterLayerFixture.h
+++ b/tests/validation/fixtures/ScatterLayerFixture.h
@@ -48,7 +48,7 @@ class ScatterGenericValidationFixture : public framework::Fixture
{
public:
void setup(TensorShape src_shape, TensorShape updates_shape, TensorShape indices_shape,
- TensorShape out_shape, DataType data_type, ScatterInfo scatter_info, bool inplace,
+ TensorShape out_shape, DataType data_type, ScatterInfo scatter_info, bool inplace, bool padding,
QuantizationInfo src_qinfo = QuantizationInfo(), QuantizationInfo o_qinfo = QuantizationInfo())
{
// this is for improving randomness across tests
@@ -57,7 +57,7 @@ public:
+ updates_shape[4] + updates_shape[5]
+ indices_shape[0] + indices_shape[1] + indices_shape[2] + indices_shape[3];
- _target = compute_target(src_shape, updates_shape, indices_shape, out_shape, data_type, scatter_info, inplace, src_qinfo, o_qinfo);
+ _target = compute_target(src_shape, updates_shape, indices_shape, out_shape, data_type, scatter_info, inplace, padding, src_qinfo, o_qinfo);
_reference = compute_reference(src_shape, updates_shape, indices_shape, out_shape, data_type,scatter_info, src_qinfo , o_qinfo);
}
@@ -103,12 +103,12 @@ protected:
void fill_indices(U &&tensor, int i, const TensorShape &shape)
{
// Calculate max indices the shape should contain. Add an arbitrary value to allow testing for some out of bounds values (In this case min dimension)
- const int32_t max = std::max({shape[0] , shape[1], shape[2]});
- library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(-2), static_cast<int32_t>(max));
+ const int32_t max = std::min({shape[0] , shape[1], shape[2]}) + 1;
+ library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(0), static_cast<int32_t>(max));
}
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
- const TensorShape &out_shape, DataType data_type, const ScatterInfo info, bool inplace,
+ const TensorShape &out_shape, DataType data_type, const ScatterInfo info, bool inplace, bool padding,
QuantizationInfo a_qinfo, QuantizationInfo o_qinfo)
{
// 1. Create relevant tensors using ScatterInfo data structure.
@@ -146,11 +146,14 @@ protected:
ARM_COMPUTE_ASSERT(indices.info()->is_resizable());
ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
- add_padding_x({ &src, &updates, &indices});
-
- if(!inplace)
+ if(padding)
{
- add_padding_x({ &dst });
+ add_padding_x({ &src, &updates, &indices});
+
+ if(!inplace)
+ {
+ add_padding_x({ &dst });
+ }
}
// Allocate tensors
@@ -197,12 +200,13 @@ protected:
TensorShape src_shape = a_shape;
TensorShape updates_shape = b_shape;
TensorShape indices_shape = c_shape;
+ const int num_ind_dims = c_shape.num_dimensions();
// 1. Collapse batch index into a single dim if necessary for update tensor and indices tensor.
- if(c_shape.num_dimensions() >= 3)
+ if(num_ind_dims >= 3)
{
indices_shape = indices_shape.collapsed_from(1);
- updates_shape = updates_shape.collapsed_from(updates_shape.num_dimensions() - 2); // Collapses from last 2 dims
+ updates_shape = updates_shape.collapsed_from(updates_shape.num_dimensions() - (num_ind_dims -1)); // Collapses batch dims
}
// 2. Collapse data dims into a single dim.
@@ -212,16 +216,16 @@ protected:
updates_shape.collapse(updates_shape.num_dimensions() - 1); // Collapse data dims (all except last dim which is batch dim)
// Create reference tensors
- SimpleTensor<T> src{ a_shape, data_type, 1, a_qinfo };
- SimpleTensor<T> updates{b_shape, data_type, 1, QuantizationInfo() };
- SimpleTensor<int32_t> indices{ c_shape, DataType::S32, 1, QuantizationInfo() };
+ SimpleTensor<T> src{ src_shape, data_type, 1, a_qinfo };
+ SimpleTensor<T> updates{updates_shape, data_type, 1, QuantizationInfo() };
+ SimpleTensor<int32_t> indices{ indices_shape, DataType::S32, 1, QuantizationInfo() };
// Fill reference
fill(src, 0 + _hash);
fill(updates, 1 + _hash);
fill_indices(indices, 2 + _hash, out_shape);
- // Calculate individual reference.
+ // Calculate individual reference using collapsed shapes
return reference::scatter_layer<T>(src, updates, indices, out_shape, info);
}
@@ -236,10 +240,10 @@ class ScatterValidationFixture : public ScatterGenericValidationFixture<TensorTy
{
public:
void setup(TensorShape src_shape, TensorShape update_shape, TensorShape indices_shape,
- TensorShape out_shape, DataType data_type, ScatterFunction func, bool zero_init, bool inplace)
+ TensorShape out_shape, DataType data_type, ScatterFunction func, bool zero_init, bool inplace, bool padding)
{
ScatterGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, update_shape,
- indices_shape, out_shape, data_type, ScatterInfo(func, zero_init), inplace,
+ indices_shape, out_shape, data_type, ScatterInfo(func, zero_init), inplace, padding,
QuantizationInfo(), QuantizationInfo());
}
};
diff --git a/tests/validation/reference/ScatterLayer.cpp b/tests/validation/reference/ScatterLayer.cpp
index c9e6035e14..55c48a9002 100644
--- a/tests/validation/reference/ScatterLayer.cpp
+++ b/tests/validation/reference/ScatterLayer.cpp
@@ -63,6 +63,7 @@ T reduce_op(const T &current,const T &update,const ScatterFunction func)
}
template float reduce_op(const float &current,const float &update,const ScatterFunction func);
+template half reduce_op(const half &current,const half &update,const ScatterFunction func);
}
// NOTE: This function expects collapsed tensors as input.