aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/UNIT/TensorAllocator.cpp
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2021-04-30 18:30:41 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2021-05-04 14:00:17 +0000
commit4fc10b3ae968bcdc8c1aaab358e93f2e1ba328dc (patch)
tree4a60bf21291422c4626aea42e33182cf46a5fce8 /tests/validation/NEON/UNIT/TensorAllocator.cpp
parent6a5eee7f267290a4894639aa349c8d82c231812a (diff)
downloadComputeLibrary-4fc10b3ae968bcdc8c1aaab358e93f2e1ba328dc.tar.gz
Turn EXPECT into ASSERT when testing invalid conditions
Relates to COMPMID-4385 Change-Id: Ibc1d67f766b7c1a399dbeacf26a4b9d9f7323785 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5549 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/NEON/UNIT/TensorAllocator.cpp')
-rw-r--r--tests/validation/NEON/UNIT/TensorAllocator.cpp46
1 files changed, 23 insertions, 23 deletions
diff --git a/tests/validation/NEON/UNIT/TensorAllocator.cpp b/tests/validation/NEON/UNIT/TensorAllocator.cpp
index ef19524d1c..d84bcd4a20 100644
--- a/tests/validation/NEON/UNIT/TensorAllocator.cpp
+++ b/tests/validation/NEON/UNIT/TensorAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -61,32 +61,32 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
// Negative case : Import nullptr
Tensor t1;
t1.allocator()->init(info);
- ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(nullptr)), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!bool(t1.allocator()->import_memory(nullptr)));
+ ARM_COMPUTE_ASSERT(t1.info()->is_resizable());
// Negative case : Import misaligned pointer
Tensor t2;
const size_t required_alignment = 339;
t2.allocator()->init(info, required_alignment);
- ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!bool(t2.allocator()->import_memory(data.get())));
+ ARM_COMPUTE_ASSERT(t2.info()->is_resizable());
// Negative case : Import memory to a tensor that is memory managed
Tensor t3;
MemoryGroup mg;
t3.allocator()->set_associated_memory_group(&mg);
- ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!bool(t3.allocator()->import_memory(data.get())));
+ ARM_COMPUTE_ASSERT(t3.info()->is_resizable());
// Positive case : Set raw pointer
Tensor t4;
t4.allocator()->init(info);
- ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bool(t4.allocator()->import_memory(data.get())));
+ ARM_COMPUTE_ASSERT(!t4.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()));
t4.allocator()->free();
- ARM_COMPUTE_EXPECT(t4.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t4.buffer() == nullptr, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(t4.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(t4.buffer() == nullptr);
}
TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
@@ -114,8 +114,8 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
void *aligned_ptr = raw_data.get();
std::align(required_alignment, total_size_in_bytes, aligned_ptr, space);
- ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(aligned_ptr)), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(aligned_ptr)));
+ ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
// Fill tensor
std::uniform_real_distribution<float> distribution(-5.f, 5.f);
@@ -137,7 +137,7 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
// Release resources
tensor.allocator()->free();
- ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
}
TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
@@ -160,8 +160,8 @@ TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
const size_t total_size_in_bytes = tensor.info()->total_size();
auto raw_data = std::make_unique<uint8_t[]>(total_size_in_bytes);
- ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(raw_data.get())), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(raw_data.get())));
+ ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
// Fill tensor while accounting padding
std::uniform_real_distribution<float> distribution(-5.f, 5.f);
@@ -190,7 +190,7 @@ TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
// Release resources
tensor.allocator()->free();
- ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
}
#if !defined(BARE_METAL)
@@ -221,12 +221,12 @@ TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL)
// Map file
utils::mmap_io::MMappedFile mmapped_file("test_mmap_import.bin", 0 /** Whole file */, 0);
- ARM_COMPUTE_EXPECT(mmapped_file.is_mapped(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(mmapped_file.is_mapped());
unsigned char *data = mmapped_file.data();
// Import memory mapped memory
- ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(data)), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(data)));
+ ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
// Fill tensor
std::uniform_real_distribution<float> distribution(-5.f, 5.f);
@@ -248,7 +248,7 @@ TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL)
// Release resources
tensor.allocator()->free();
- ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
}
#endif // !defined(BARE_METAL)
@@ -262,7 +262,7 @@ TEST_CASE(AlignedAlloc, framework::DatasetMode::ALL)
t.allocator()->init(info, requested_alignment);
t.allocator()->allocate();
- ARM_COMPUTE_EXPECT(t.buffer() != nullptr, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(t.buffer() != nullptr);
ARM_COMPUTE_EXPECT(t.allocator()->alignment() == requested_alignment, framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(arm_compute::utility::check_aligned(reinterpret_cast<void *>(t.buffer()), requested_alignment),
framework::LogLevel::ERRORS);