diff options
Diffstat (limited to 'tests/validation/NEON/UNIT')
-rw-r--r-- | tests/validation/NEON/UNIT/MemoryManager.cpp | 10 | ||||
-rw-r--r-- | tests/validation/NEON/UNIT/RuntimeContext.cpp | 24 | ||||
-rw-r--r-- | tests/validation/NEON/UNIT/TensorAllocator.cpp | 46 |
3 files changed, 40 insertions, 40 deletions
diff --git a/tests/validation/NEON/UNIT/MemoryManager.cpp b/tests/validation/NEON/UNIT/MemoryManager.cpp index 83a9fcb332..2c57b534fe 100644 --- a/tests/validation/NEON/UNIT/MemoryManager.cpp +++ b/tests/validation/NEON/UNIT/MemoryManager.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -62,15 +62,15 @@ TEST_CASE(BlobMemoryManagerSimpleWithinFunctionLevel, framework::DatasetMode::AL norm_layer_1.configure(&src, &dst, NormalizationLayerInfo(NormType::CROSS_MAP, 3)); norm_layer_2.configure(&src, &dst, NormalizationLayerInfo(NormType::IN_MAP_1D, 3)); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Finalize memory manager mm->populate(allocator, 1 /* num_pools */); diff --git a/tests/validation/NEON/UNIT/RuntimeContext.cpp b/tests/validation/NEON/UNIT/RuntimeContext.cpp index f64d380423..819811943d 100644 --- a/tests/validation/NEON/UNIT/RuntimeContext.cpp +++ b/tests/validation/NEON/UNIT/RuntimeContext.cpp @@ -57,14 +57,14 @@ TEST_CASE(Scheduler, framework::DatasetMode::ALL) RuntimeContext ctx; // Check if it's been initialised properly - ARM_COMPUTE_EXPECT(ctx.scheduler() != nullptr, framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(ctx.asset_manager() == nullptr, framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(ctx.scheduler() != nullptr); + ARM_COMPUTE_ASSERT(ctx.asset_manager() == nullptr); // Create a Scheduler auto scheduler = SchedulerFactory::create(); ctx.set_scheduler(scheduler.get()); // Check if the scheduler has been properly setup - ARM_COMPUTE_EXPECT(ctx.scheduler() != nullptr, framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(ctx.scheduler() != nullptr); // Create a new activation function NEActivationLayer act_layer(&ctx); @@ -74,14 +74,14 @@ TEST_CASE(Scheduler, framework::DatasetMode::ALL) act_layer.configure(&src, &dst, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR)); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); float min_bound = 0; float max_bound = 0; @@ -117,10 +117,10 @@ TEST_CASE(MultipleThreadedScheduller, framework::DatasetMode::ALL) act_layer_thread0.configure(&src_t0, &dst_t0, activation_info); act_layer_thread1.configure(&src_t1, &dst_t1, activation_info); - ARM_COMPUTE_EXPECT(src_t0.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst_t0.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(src_t1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst_t1.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src_t0.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst_t0.info()->is_resizable()); + ARM_COMPUTE_ASSERT(src_t1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst_t1.info()->is_resizable()); // Allocate tensors src_t0.allocator()->allocate(); @@ -128,8 +128,8 @@ TEST_CASE(MultipleThreadedScheduller, framework::DatasetMode::ALL) src_t1.allocator()->allocate(); dst_t1.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src_t0.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!src_t1.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src_t0.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!src_t1.info()->is_resizable()); float min_bound = 0; float max_bound = 0; diff --git a/tests/validation/NEON/UNIT/TensorAllocator.cpp b/tests/validation/NEON/UNIT/TensorAllocator.cpp index ef19524d1c..d84bcd4a20 100644 --- a/tests/validation/NEON/UNIT/TensorAllocator.cpp +++ b/tests/validation/NEON/UNIT/TensorAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -61,32 +61,32 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL) // Negative case : Import nullptr Tensor t1; t1.allocator()->init(info); - ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(nullptr)), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!bool(t1.allocator()->import_memory(nullptr))); + ARM_COMPUTE_ASSERT(t1.info()->is_resizable()); // Negative case : Import misaligned pointer Tensor t2; const size_t required_alignment = 339; t2.allocator()->init(info, required_alignment); - ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!bool(t2.allocator()->import_memory(data.get()))); + ARM_COMPUTE_ASSERT(t2.info()->is_resizable()); // Negative case : Import memory to a tensor that is memory managed Tensor t3; MemoryGroup mg; t3.allocator()->set_associated_memory_group(&mg); - ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!bool(t3.allocator()->import_memory(data.get()))); + ARM_COMPUTE_ASSERT(t3.info()->is_resizable()); // Positive case : Set raw pointer Tensor t4; t4.allocator()->init(info); - ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(bool(t4.allocator()->import_memory(data.get()))); + ARM_COMPUTE_ASSERT(!t4.info()->is_resizable()); + ARM_COMPUTE_ASSERT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get())); t4.allocator()->free(); - ARM_COMPUTE_EXPECT(t4.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t4.buffer() == nullptr, framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(t4.info()->is_resizable()); + ARM_COMPUTE_ASSERT(t4.buffer() == nullptr); } TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL) @@ -114,8 +114,8 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL) void *aligned_ptr = raw_data.get(); std::align(required_alignment, total_size_in_bytes, aligned_ptr, space); - ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(aligned_ptr)), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(aligned_ptr))); + ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable()); // Fill tensor std::uniform_real_distribution<float> distribution(-5.f, 5.f); @@ -137,7 +137,7 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL) // Release resources tensor.allocator()->free(); - ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(tensor.info()->is_resizable()); } TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL) @@ -160,8 +160,8 @@ TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL) const size_t total_size_in_bytes = tensor.info()->total_size(); auto raw_data = std::make_unique<uint8_t[]>(total_size_in_bytes); - ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(raw_data.get())), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(raw_data.get()))); + ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable()); // Fill tensor while accounting padding std::uniform_real_distribution<float> distribution(-5.f, 5.f); @@ -190,7 +190,7 @@ TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL) // Release resources tensor.allocator()->free(); - ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(tensor.info()->is_resizable()); } #if !defined(BARE_METAL) @@ -221,12 +221,12 @@ TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL) // Map file utils::mmap_io::MMappedFile mmapped_file("test_mmap_import.bin", 0 /** Whole file */, 0); - ARM_COMPUTE_EXPECT(mmapped_file.is_mapped(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(mmapped_file.is_mapped()); unsigned char *data = mmapped_file.data(); // Import memory mapped memory - ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(data)), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(data))); + ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable()); // Fill tensor std::uniform_real_distribution<float> distribution(-5.f, 5.f); @@ -248,7 +248,7 @@ TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL) // Release resources tensor.allocator()->free(); - ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(tensor.info()->is_resizable()); } #endif // !defined(BARE_METAL) @@ -262,7 +262,7 @@ TEST_CASE(AlignedAlloc, framework::DatasetMode::ALL) t.allocator()->init(info, requested_alignment); t.allocator()->allocate(); - ARM_COMPUTE_EXPECT(t.buffer() != nullptr, framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(t.buffer() != nullptr); ARM_COMPUTE_EXPECT(t.allocator()->alignment() == requested_alignment, framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(arm_compute::utility::check_aligned(reinterpret_cast<void *>(t.buffer()), requested_alignment), framework::LogLevel::ERRORS); |