aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
authorFinn Williams <finn.williams@arm.com>2021-10-28 19:07:32 +0100
committerFinn Williams <finn.williams@arm.com>2021-11-08 14:33:17 +0000
commitb1aad4270fa8ad5c4aa62e27d564baf723b2cee5 (patch)
tree98b19ba85b50e2c730d5d2e3822cd2b1438bd149 /src/backends/reference
parent3f22d27f51c493e37b9da0692b6bf776f4430dcf (diff)
downloadarmnn-b1aad4270fa8ad5c4aa62e27d564baf723b2cee5.tar.gz
IVGCVSW-6527 Support the new memory API in loaded network
* enable external memory management for neon and ref backends * change m_TensorMemoryVector to hold shared pointers * change input layer backend Id to match backend id of connected layer Signed-off-by: Finn Williams <finn.williams@arm.com> Change-Id: I2216a724028312eb101b290df3f224177826b1a0
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefBackend.hpp2
-rw-r--r--src/backends/reference/RefTensorHandle.cpp2
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp22
3 files changed, 19 insertions, 7 deletions
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index 6114ce6218..da04f22d93 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -16,7 +16,7 @@ const BackendCapabilities cpuRefCapabilities("CpuRef",
{"ProtectedContentAllocation", false},
{"ConstantTensorsAsInputs", true},
{"PreImportIOTensors", true},
- {"ExternallyManagedMemory", false},
+ {"ExternallyManagedMemory", true},
{"MultiAxisPacking", false},
{"SingleAxisPacking", true}
});
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index b9e566eace..5229e9d62b 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -122,7 +122,7 @@ bool RefTensorHandle::Import(void* memory, MemorySource source)
if (m_IsImportEnabled && source == MemorySource::Malloc)
{
// Check memory alignment
- constexpr uintptr_t alignment = sizeof(size_t);
+ uintptr_t alignment = GetDataTypeSize(m_TensorInfo.GetDataType());
if (reinterpret_cast<uintptr_t>(memory) % alignment)
{
if (m_Imported)
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 75008bc866..36dcd21d32 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -113,10 +113,14 @@ bool RefWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer,
std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
const bool isMemoryManaged) const
{
- // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
- // to unmanaged memory. This also ensures memory alignment.
- IgnoreUnused(isMemoryManaged);
- return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
+ if (isMemoryManaged)
+ {
+ return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
+ }
+ else
+ {
+ return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
+ }
}
std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
@@ -126,7 +130,15 @@ std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const Tens
// For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
// to unmanaged memory. This also ensures memory alignment.
IgnoreUnused(isMemoryManaged, dataLayout);
- return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
+
+ if (isMemoryManaged)
+ {
+ return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
+ }
+ else
+ {
+ return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
+ }
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,