aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/UNIT
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2021-04-30 18:30:41 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2021-05-04 14:00:17 +0000
commit4fc10b3ae968bcdc8c1aaab358e93f2e1ba328dc (patch)
tree4a60bf21291422c4626aea42e33182cf46a5fce8 /tests/validation/NEON/UNIT
parent6a5eee7f267290a4894639aa349c8d82c231812a (diff)
downloadComputeLibrary-4fc10b3ae968bcdc8c1aaab358e93f2e1ba328dc.tar.gz
Turn EXPECT into ASSERT when testing invalid conditions
Relates to COMPMID-4385 Change-Id: Ibc1d67f766b7c1a399dbeacf26a4b9d9f7323785 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5549 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/NEON/UNIT')
-rw-r--r--tests/validation/NEON/UNIT/MemoryManager.cpp10
-rw-r--r--tests/validation/NEON/UNIT/RuntimeContext.cpp24
-rw-r--r--tests/validation/NEON/UNIT/TensorAllocator.cpp46
3 files changed, 40 insertions, 40 deletions
diff --git a/tests/validation/NEON/UNIT/MemoryManager.cpp b/tests/validation/NEON/UNIT/MemoryManager.cpp
index 83a9fcb332..2c57b534fe 100644
--- a/tests/validation/NEON/UNIT/MemoryManager.cpp
+++ b/tests/validation/NEON/UNIT/MemoryManager.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,15 +62,15 @@ TEST_CASE(BlobMemoryManagerSimpleWithinFunctionLevel, framework::DatasetMode::AL
norm_layer_1.configure(&src, &dst, NormalizationLayerInfo(NormType::CROSS_MAP, 3));
norm_layer_2.configure(&src, &dst, NormalizationLayerInfo(NormType::IN_MAP_1D, 3));
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Finalize memory manager
mm->populate(allocator, 1 /* num_pools */);
diff --git a/tests/validation/NEON/UNIT/RuntimeContext.cpp b/tests/validation/NEON/UNIT/RuntimeContext.cpp
index f64d380423..819811943d 100644
--- a/tests/validation/NEON/UNIT/RuntimeContext.cpp
+++ b/tests/validation/NEON/UNIT/RuntimeContext.cpp
@@ -57,14 +57,14 @@ TEST_CASE(Scheduler, framework::DatasetMode::ALL)
RuntimeContext ctx;
// Check if it's been initialised properly
- ARM_COMPUTE_EXPECT(ctx.scheduler() != nullptr, framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(ctx.asset_manager() == nullptr, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(ctx.scheduler() != nullptr);
+ ARM_COMPUTE_ASSERT(ctx.asset_manager() == nullptr);
// Create a Scheduler
auto scheduler = SchedulerFactory::create();
ctx.set_scheduler(scheduler.get());
// Check if the scheduler has been properly setup
- ARM_COMPUTE_EXPECT(ctx.scheduler() != nullptr, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(ctx.scheduler() != nullptr);
// Create a new activation function
NEActivationLayer act_layer(&ctx);
@@ -74,14 +74,14 @@ TEST_CASE(Scheduler, framework::DatasetMode::ALL)
act_layer.configure(&src, &dst, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR));
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
float min_bound = 0;
float max_bound = 0;
@@ -117,10 +117,10 @@ TEST_CASE(MultipleThreadedScheduller, framework::DatasetMode::ALL)
act_layer_thread0.configure(&src_t0, &dst_t0, activation_info);
act_layer_thread1.configure(&src_t1, &dst_t1, activation_info);
- ARM_COMPUTE_EXPECT(src_t0.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst_t0.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src_t1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst_t1.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src_t0.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst_t0.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(src_t1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst_t1.info()->is_resizable());
// Allocate tensors
src_t0.allocator()->allocate();
@@ -128,8 +128,8 @@ TEST_CASE(MultipleThreadedScheduller, framework::DatasetMode::ALL)
src_t1.allocator()->allocate();
dst_t1.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src_t0.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!src_t1.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src_t0.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!src_t1.info()->is_resizable());
float min_bound = 0;
float max_bound = 0;
diff --git a/tests/validation/NEON/UNIT/TensorAllocator.cpp b/tests/validation/NEON/UNIT/TensorAllocator.cpp
index ef19524d1c..d84bcd4a20 100644
--- a/tests/validation/NEON/UNIT/TensorAllocator.cpp
+++ b/tests/validation/NEON/UNIT/TensorAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -61,32 +61,32 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
// Negative case : Import nullptr
Tensor t1;
t1.allocator()->init(info);
- ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(nullptr)), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!bool(t1.allocator()->import_memory(nullptr)));
+ ARM_COMPUTE_ASSERT(t1.info()->is_resizable());
// Negative case : Import misaligned pointer
Tensor t2;
const size_t required_alignment = 339;
t2.allocator()->init(info, required_alignment);
- ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!bool(t2.allocator()->import_memory(data.get())));
+ ARM_COMPUTE_ASSERT(t2.info()->is_resizable());
// Negative case : Import memory to a tensor that is memory managed
Tensor t3;
MemoryGroup mg;
t3.allocator()->set_associated_memory_group(&mg);
- ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!bool(t3.allocator()->import_memory(data.get())));
+ ARM_COMPUTE_ASSERT(t3.info()->is_resizable());
// Positive case : Set raw pointer
Tensor t4;
t4.allocator()->init(info);
- ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bool(t4.allocator()->import_memory(data.get())));
+ ARM_COMPUTE_ASSERT(!t4.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()));
t4.allocator()->free();
- ARM_COMPUTE_EXPECT(t4.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t4.buffer() == nullptr, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(t4.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(t4.buffer() == nullptr);
}
TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
@@ -114,8 +114,8 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
void *aligned_ptr = raw_data.get();
std::align(required_alignment, total_size_in_bytes, aligned_ptr, space);
- ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(aligned_ptr)), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(aligned_ptr)));
+ ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
// Fill tensor
std::uniform_real_distribution<float> distribution(-5.f, 5.f);
@@ -137,7 +137,7 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
// Release resources
tensor.allocator()->free();
- ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
}
TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
@@ -160,8 +160,8 @@ TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
const size_t total_size_in_bytes = tensor.info()->total_size();
auto raw_data = std::make_unique<uint8_t[]>(total_size_in_bytes);
- ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(raw_data.get())), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(raw_data.get())));
+ ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
// Fill tensor while accounting padding
std::uniform_real_distribution<float> distribution(-5.f, 5.f);
@@ -190,7 +190,7 @@ TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
// Release resources
tensor.allocator()->free();
- ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
}
#if !defined(BARE_METAL)
@@ -221,12 +221,12 @@ TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL)
// Map file
utils::mmap_io::MMappedFile mmapped_file("test_mmap_import.bin", 0 /** Whole file */, 0);
- ARM_COMPUTE_EXPECT(mmapped_file.is_mapped(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(mmapped_file.is_mapped());
unsigned char *data = mmapped_file.data();
// Import memory mapped memory
- ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(data)), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(data)));
+ ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
// Fill tensor
std::uniform_real_distribution<float> distribution(-5.f, 5.f);
@@ -248,7 +248,7 @@ TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL)
// Release resources
tensor.allocator()->free();
- ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
}
#endif // !defined(BARE_METAL)
@@ -262,7 +262,7 @@ TEST_CASE(AlignedAlloc, framework::DatasetMode::ALL)
t.allocator()->init(info, requested_alignment);
t.allocator()->allocate();
- ARM_COMPUTE_EXPECT(t.buffer() != nullptr, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(t.buffer() != nullptr);
ARM_COMPUTE_EXPECT(t.allocator()->alignment() == requested_alignment, framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(arm_compute::utility::check_aligned(reinterpret_cast<void *>(t.buffer()), requested_alignment),
framework::LogLevel::ERRORS);