From 60ab9765329b1449b509b32b07f0b0abb3b532f2 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Thu, 13 Jan 2022 09:34:44 +0000 Subject: IVGCVSW-6673 Implement CanBeImported function to ClTensorHandle * Added Unittests Signed-off-by: Nikhil Raj Signed-off-by: David Monahan Change-Id: If7c0add39583a7e47b43fd79f93c620f86f80fc1 --- src/backends/cl/test/ClImportTensorHandleTests.cpp | 57 ++++++++++++++++++++++ 1 file changed, 57 insertions(+) (limited to 'src/backends/cl/test/ClImportTensorHandleTests.cpp') diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp index 0403d5379e..3d702642aa 100644 --- a/src/backends/cl/test/ClImportTensorHandleTests.cpp +++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp @@ -217,4 +217,61 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd") } } +TEST_CASE_FIXTURE(ClContextControlFixture, "ClCanBeImported") +{ + ClImportTensorHandleFactory handleFactory(static_cast(MemorySource::Malloc), + static_cast(MemorySource::Malloc)); + + TensorInfo info({ 1, 24, 16, 3 }, DataType::Float32); + + // create TensorHandle for memory import + auto handle = handleFactory.CreateTensorHandle(info); + + // Get CLtensor + arm_compute::CLTensor& tensor = PolymorphicDowncast(handle.get())->GetTensor(); + + // Allocate user memory + const size_t totalBytes = tensor.info()->total_size(); + const size_t alignment = + arm_compute::CLKernelLibrary::get().get_device().getInfo(); + size_t space = totalBytes + alignment + alignment; + auto testData = std::make_unique(space); + void* alignedPtr = testData.get(); + CHECK(std::align(alignment, totalBytes, alignedPtr, space)); + + // Import memory + CHECK_THROWS_AS(handle->CanBeImported(alignedPtr, armnn::MemorySource::Undefined), MemoryImportException); + +} + +TEST_CASE("ClCanBeImportedAlignedMemory") +{ + ClImportTensorHandleFactory handleFactory(static_cast(MemorySource::Malloc), + static_cast(MemorySource::Malloc)); + + TensorInfo info({ 1, 1, 1, 1 }, DataType::Float32); + + // create TensorHandle (Memory Managed status is irrelevant) + auto handle = handleFactory.CreateTensorHandle(info); + // Get CLtensor + arm_compute::CLTensor& tensor = PolymorphicDowncast(handle.get())->GetTensor(); + + // Create an aligned buffer + const size_t totalBytes = tensor.info()->total_size(); + const size_t alignment = + arm_compute::CLKernelLibrary::get().get_device().getInfo(); + size_t space = totalBytes + alignment + alignment; + auto testData = std::make_unique(space); + void* alignedPtr = testData.get(); + CHECK(std::align(alignment, totalBytes, alignedPtr, space)); + + // Check aligned buffers return true + CHECK(handle->CanBeImported(alignedPtr, MemorySource::Malloc) == true); + + // Due to the nature of how GPU memory is mapped it is entirely possible for memory which is misaligned on cpu + // to be successfully import on GPU. As such there is no way to create a misaligned pointer that will always fail. + // Rather it will succeed on some devices and fail on others. As long as a correctly aligned buffer returns true + // we can be confident that it will be successfully imported. All other cases will need to be handled by the user. +} + } -- cgit v1.2.1