From 4af561666b0ce5c12164447a5f7eb9722abb85f8 Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Tue, 20 Apr 2021 16:37:55 +0100 Subject: IVGCVSW-5816 Constant memory access * Add new class ManagedConstTensorHandle to Unmap when out of scope * Integrate into existing layers that have constants * Add unit tests Signed-off-by: Francis Murtagh Change-Id: I0a05e14e438804b37e9862e76b5ca329483f6b45 --- src/backends/backendsCommon/CpuTensorHandle.hpp | 67 ++++++++++++++++++++++ .../test/DefaultAsyncExecuteTest.cpp | 1 - 2 files changed, 67 insertions(+), 1 deletion(-) (limited to 'src/backends/backendsCommon') diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/CpuTensorHandle.hpp index a300fe09c2..fdd2439b41 100644 --- a/src/backends/backendsCommon/CpuTensorHandle.hpp +++ b/src/backends/backendsCommon/CpuTensorHandle.hpp @@ -175,4 +175,71 @@ const void* ConstCpuTensorHandle::GetConstTensor() const; template <> void* CpuTensorHandle::GetTensor() const; +class ManagedConstTensorHandle +{ + +public: + explicit ManagedConstTensorHandle(std::shared_ptr ptr) + : m_Mapped(false) + , m_TensorHandle(std::move(ptr)) {}; + + /// RAII Managed resource Unmaps MemoryArea once out of scope + const void* Map(bool blocking = true) + { + if (m_TensorHandle) + { + auto pRet = m_TensorHandle->Map(blocking); + m_Mapped = true; + return pRet; + } + else + { + throw armnn::Exception("Attempting to Map null TensorHandle"); + } + + } + + // Delete copy constructor as it's unnecessary + ManagedConstTensorHandle(const ConstCpuTensorHandle& other) = delete; + + // Delete copy assignment as it's unnecessary + ManagedConstTensorHandle& operator=(const ManagedConstTensorHandle& other) = delete; + + // Delete move assignment as it's unnecessary + ManagedConstTensorHandle& operator=(ManagedConstTensorHandle&& other) noexcept = delete; + + ~ManagedConstTensorHandle() + { + // Bias tensor handles need to be initialized empty before entering scope of if statement checking if enabled + if (m_TensorHandle) + { + Unmap(); + } + } + + void Unmap() + { + // Only unmap if mapped and TensorHandle exists. + if (m_Mapped && m_TensorHandle) + { + m_TensorHandle->Unmap(); + m_Mapped = false; + } + } + + const TensorInfo& GetTensorInfo() const + { + return m_TensorHandle->GetTensorInfo(); + } + + bool IsMapped() const + { + return m_Mapped; + } + +private: + bool m_Mapped; + std::shared_ptr m_TensorHandle; +}; + } // namespace armnn diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp index 0d4595210e..56a794e77c 100644 --- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp +++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp @@ -243,7 +243,6 @@ BOOST_AUTO_TEST_CASE(TestDefaultAsyncExeuteWithThreads) ValidateTensor(workingMemDescriptor2.m_Inputs[0], expectedExecuteval2); } - BOOST_AUTO_TEST_SUITE_END() } \ No newline at end of file -- cgit v1.2.1