aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/RefTensorHandle.cpp
diff options
context:
space:
mode:
authorMatthew Bentham <matthew.bentham@arm.com>2022-11-23 18:17:48 +0000
committerTeresaARM <teresa.charlinreyes@arm.com>2022-12-15 12:24:27 +0000
commit6b5f674aad30a3438c295c25b5d115007e80b757 (patch)
tree3932603e408330c9f5d09b19a4b224e47e996dec /src/backends/reference/RefTensorHandle.cpp
parentda6bf9e2eac374cd92147d3c60a8af8bd6bc5a37 (diff)
downloadarmnn-6b5f674aad30a3438c295c25b5d115007e80b757.tar.gz
Change the semantics of RefTensorHandle::Import to 'overlay' existing memory
This makes it possible to call Import on an Allocated() or memory-managed Tensor, which is needed for the current implementation of OptimizerOptions::m_ExportEnabled to work (as the last layer before the OutputLayer needs to be able to Import the user's OutputTensor, but this is done after other memory allocation). Signed-off-by: Matthew Bentham <matthew.bentham@arm.com> Change-Id: I1a885c2da7b1f0f3964ae53b8135b5e96a66614f
Diffstat (limited to 'src/backends/reference/RefTensorHandle.cpp')
-rw-r--r--src/backends/reference/RefTensorHandle.cpp95
1 files changed, 28 insertions, 67 deletions
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index eccdc26542..dbfa374945 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -12,8 +12,7 @@ RefTensorHandle::RefTensorHandle(const TensorInfo &tensorInfo, std::shared_ptr<R
m_MemoryManager(memoryManager),
m_Pool(nullptr),
m_UnmanagedMemory(nullptr),
- m_Imported(false),
- m_IsImportEnabled(false)
+ m_ImportedMemory(nullptr)
{
}
@@ -22,59 +21,46 @@ RefTensorHandle::RefTensorHandle(const TensorInfo& tensorInfo)
: m_TensorInfo(tensorInfo),
m_Pool(nullptr),
m_UnmanagedMemory(nullptr),
- m_Imported(false),
- m_IsImportEnabled(true)
+ m_ImportedMemory(nullptr)
{
}
RefTensorHandle::~RefTensorHandle()
{
- if (!m_Pool)
- {
- // unmanaged
- if (!m_Imported)
- {
- ::operator delete(m_UnmanagedMemory);
- }
- }
+ ::operator delete(m_UnmanagedMemory);
}
void RefTensorHandle::Manage()
{
- if (!m_IsImportEnabled)
- {
- ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
- ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+ ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
+ ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+ if (m_MemoryManager)
+ {
m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
}
}
void RefTensorHandle::Allocate()
{
- // If import is enabled, do not allocate the tensor
- if (!m_IsImportEnabled)
+ if (!m_UnmanagedMemory)
{
-
- if (!m_UnmanagedMemory)
+ if (!m_Pool)
{
- if (!m_Pool)
- {
- // unmanaged
- m_UnmanagedMemory = ::operator new(m_TensorInfo.GetNumBytes());
- }
- else
- {
- m_MemoryManager->Allocate(m_Pool);
- }
+ // unmanaged
+ m_UnmanagedMemory = ::operator new(m_TensorInfo.GetNumBytes());
}
else
{
- throw InvalidArgumentException("RefTensorHandle::Allocate Trying to allocate a RefTensorHandle"
- "that already has allocated memory.");
+ m_MemoryManager->Allocate(m_Pool);
}
}
+ else
+ {
+ throw InvalidArgumentException("RefTensorHandle::Allocate Trying to allocate a RefTensorHandle"
+ "that already has allocated memory.");
+ }
}
const void* RefTensorHandle::Map(bool /*unused*/) const
@@ -84,7 +70,11 @@ const void* RefTensorHandle::Map(bool /*unused*/) const
void* RefTensorHandle::GetPointer() const
{
- if (m_UnmanagedMemory)
+ if (m_ImportedMemory)
+ {
+ return m_ImportedMemory;
+ }
+ else if (m_UnmanagedMemory)
{
return m_UnmanagedMemory;
}
@@ -114,51 +104,22 @@ void RefTensorHandle::CopyInFrom(const void* src)
MemorySourceFlags RefTensorHandle::GetImportFlags() const
{
- if (m_IsImportEnabled)
- {
- return static_cast<MemorySourceFlags>(MemorySource::Malloc);
- }
- else
- {
- return static_cast<MemorySourceFlags>(MemorySource::Undefined);
- }
+ return static_cast<MemorySourceFlags>(MemorySource::Malloc);
}
bool RefTensorHandle::Import(void* memory, MemorySource source)
{
- if (m_IsImportEnabled && source == MemorySource::Malloc)
+ if (source == MemorySource::Malloc)
{
// Check memory alignment
if(!CanBeImported(memory, source))
{
- if (m_Imported)
- {
- m_Imported = false;
- m_UnmanagedMemory = nullptr;
- }
+ m_ImportedMemory = nullptr;
return false;
}
- // m_UnmanagedMemory not yet allocated.
- if (!m_Imported && !m_UnmanagedMemory)
- {
- m_UnmanagedMemory = memory;
- m_Imported = true;
- return true;
- }
-
- // m_UnmanagedMemory initially allocated with Allocate().
- if (!m_Imported && m_UnmanagedMemory)
- {
- return false;
- }
-
- // m_UnmanagedMemory previously imported.
- if (m_Imported)
- {
- m_UnmanagedMemory = memory;
- return true;
- }
+ m_ImportedMemory = memory;
+ return true;
}
return false;
@@ -166,7 +127,7 @@ bool RefTensorHandle::Import(void* memory, MemorySource source)
bool RefTensorHandle::CanBeImported(void *memory, MemorySource source)
{
- if (m_IsImportEnabled && source == MemorySource::Malloc)
+ if (source == MemorySource::Malloc)
{
uintptr_t alignment = GetDataTypeSize(m_TensorInfo.GetDataType());
if (reinterpret_cast<uintptr_t>(memory) % alignment)