aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2022-07-01 14:32:05 +0100
committerNikhil Raj <nikhil.raj@arm.com>2022-07-08 15:23:07 +0100
commita045ac07f8613edc4e0027b226eadd72e7a423ec (patch)
tree7eec878e05d762f86febea2195eee2a3a2f1a687 /src
parent93b650cb6c602aea3436725439d28ab0806142ca (diff)
downloadarmnn-a045ac07f8613edc4e0027b226eadd72e7a423ec.tar.gz
IVGCVSW-6957 'Import Host Memory in SL'
* Enabled import host memory in SL as default * Updated import host memory functionality in GpuAcc Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I22132b1e1008159b0e7247219762e3e9ae5eba10
Diffstat (limited to 'src')
-rw-r--r--src/armnn/LoadedNetwork.cpp37
-rw-r--r--src/backends/cl/ClImportTensorHandle.hpp54
2 files changed, 30 insertions, 61 deletions
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index a27add921e..8e664e699d 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1466,13 +1466,21 @@ std::vector<ImportedInputId> LoadedNetwork::ImportInputs(const InputTensors& inp
std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
inputTensor.second.GetMemoryArea());
- if (outputTensorHandle->CanBeImported(passThroughTensorHandle->Map(), forceImportMemorySource)
- && (outputTensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource)))
+ try
{
- importedInputs.push_back(inputIndex);
+ if (outputTensorHandle->CanBeImported(passThroughTensorHandle->Map(), forceImportMemorySource)
+ && (outputTensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource)))
+ {
+ importedInputs.push_back(inputIndex);
+ }
+ passThroughTensorHandle->Unmap();
+ }
+ catch(const MemoryImportException& exception)
+ {
+ ARMNN_LOG(error) << "An error occurred attempting to import input_"
+ << inputIndex << " : " << exception.what();
+ passThroughTensorHandle->Unmap();
}
- passThroughTensorHandle->Unmap();
-
inputIndex++;
}
@@ -1576,7 +1584,6 @@ std::vector<ImportedOutputId> LoadedNetwork::ImportOutputs(const OutputTensors&
for (const BindableLayer* const outputLayer : graph.GetOutputLayers())
{
auto inputTensorHandle = m_PreImportedOutputHandles[outputIndex].m_TensorHandle.get();
-
if (!inputTensorHandle)
{
outputIndex++;
@@ -1596,11 +1603,19 @@ std::vector<ImportedOutputId> LoadedNetwork::ImportOutputs(const OutputTensors&
}
const auto outputTensor = *it;
- // Check if the output memory can be imported
- if (inputTensorHandle->CanBeImported(outputTensor.second.GetMemoryArea(), forceImportMemorySource)
- && inputTensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
+ try
+ {
+ // Check if the output memory can be imported
+ if (inputTensorHandle->CanBeImported(outputTensor.second.GetMemoryArea(), forceImportMemorySource)
+ && inputTensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
+ {
+ importedOutputs.push_back(outputIndex);
+ }
+ }
+ catch(const MemoryImportException& exception)
{
- importedOutputs.push_back(outputIndex);
+ ARMNN_LOG(error) << "An error occurred attempting to import output_"
+ << outputIndex << " : " << exception.what();
}
outputIndex++;
}
diff --git a/src/backends/cl/ClImportTensorHandle.hpp b/src/backends/cl/ClImportTensorHandle.hpp
index 54710d8135..aba12d0977 100644
--- a/src/backends/cl/ClImportTensorHandle.hpp
+++ b/src/backends/cl/ClImportTensorHandle.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -101,7 +101,6 @@ public:
CL_IMPORT_TYPE_HOST_ARM,
0
};
-
return ClImport(importProperties, memory);
}
if (source == MemorySource::DmaBuf)
@@ -185,59 +184,14 @@ public:
}
}
- virtual bool CanBeImported(void* memory, MemorySource source) override
+ virtual bool CanBeImported(void* /*memory*/, MemorySource source) override
{
if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
{
if (source == MemorySource::Malloc)
{
- const cl_import_properties_arm importProperties[] =
- {
- CL_IMPORT_TYPE_ARM,
- CL_IMPORT_TYPE_HOST_ARM,
- 0
- };
-
- size_t totalBytes = m_Tensor.info()->total_size();
-
- // Round the size of the mapping to match the CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE
- // This does not change the size of the buffer, only the size of the mapping the buffer is mapped to
- // We do this to match the behaviour of the Import function later on.
- auto cachelineAlignment =
- arm_compute::CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE>();
- auto roundedSize = totalBytes;
- if (totalBytes % cachelineAlignment != 0)
- {
- roundedSize = cachelineAlignment + totalBytes - (totalBytes % cachelineAlignment);
- }
-
- cl_int error = CL_SUCCESS;
- cl_mem buffer;
- buffer = clImportMemoryARM(arm_compute::CLKernelLibrary::get().context().get(),
- CL_MEM_READ_WRITE, importProperties, memory, roundedSize, &error);
-
- // If we fail to map we know the import will not succeed and can return false.
- // There is no memory to be released if error is not CL_SUCCESS
- if (error != CL_SUCCESS)
- {
- return false;
- }
- else
- {
- // If import was successful we can release the mapping knowing import will succeed at workload
- // execution and return true
- error = clReleaseMemObject(buffer);
- if (error == CL_SUCCESS)
- {
- return true;
- }
- else
- {
- // If we couldn't release the mapping this constitutes a memory leak and throw an exception
- throw MemoryImportException("ClImportTensorHandle::Failed to unmap cl_mem buffer: "
- + std::to_string(error));
- }
- }
+ // Returning true as ClImport() function will decide if memory can be imported or not
+ return true;
}
}
else