aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/CLTensor.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/CL/CLTensor.cpp')
-rw-r--r--src/runtime/CL/CLTensor.cpp16
1 files changed, 11 insertions, 5 deletions
diff --git a/src/runtime/CL/CLTensor.cpp b/src/runtime/CL/CLTensor.cpp
index 9bbf926b58..a6d0cf77ca 100644
--- a/src/runtime/CL/CLTensor.cpp
+++ b/src/runtime/CL/CLTensor.cpp
@@ -23,15 +23,21 @@
*/
#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLRuntimeContext.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
namespace arm_compute
{
-CLTensor::CLTensor()
- : _allocator(this)
+CLTensor::CLTensor(IRuntimeContext *ctx)
+ : _allocator(this, static_cast<CLRuntimeContext *>(ctx)), _ctx(static_cast<CLRuntimeContext *>(ctx))
{
}
+CLRuntimeContext *CLTensor::context()
+{
+ return _ctx;
+}
+
TensorInfo *CLTensor::info() const
{
return &_allocator.info();
@@ -59,12 +65,12 @@ CLTensorAllocator *CLTensor::allocator()
void CLTensor::map(bool blocking)
{
- ICLTensor::map(CLScheduler::get().queue(), blocking);
+ ICLTensor::map(_ctx == nullptr ? CLScheduler::get().queue() : _ctx->gpu_scheduler()->queue(), blocking);
}
void CLTensor::unmap()
{
- ICLTensor::unmap(CLScheduler::get().queue());
+ ICLTensor::unmap(_ctx == nullptr ? CLScheduler::get().queue() : _ctx->gpu_scheduler()->queue());
}
uint8_t *CLTensor::do_map(cl::CommandQueue &q, bool blocking)
@@ -81,4 +87,4 @@ void CLTensor::associate_memory_group(arm_compute::IMemoryGroup *memory_group)
{
_allocator.set_associated_memory_group(memory_group);
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute