From 26014cf4d0519aef280c8444c60ec34c4e37e3b6 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Mon, 9 Sep 2019 19:00:57 +0100 Subject: COMPMID-2649: Generalize MemoryGroup. Avoids any upcasting. Change-Id: I2181c7c9df59a7fb8a78e11934fbd96058fd39c7 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/1918 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice --- src/runtime/CL/CLTensor.cpp | 10 ++++++++-- src/runtime/CL/CLTensorAllocator.cpp | 9 ++++----- .../CL/functions/CLDeconvolutionLayerUpsample.cpp | 1 + src/runtime/CL/functions/CLSoftmaxLayer.cpp | 1 - src/runtime/GLES_COMPUTE/GCTensor.cpp | 14 ++++++++++---- src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp | 8 ++++---- src/runtime/Tensor.cpp | 12 +++++++++--- src/runtime/TensorAllocator.cpp | 18 +++++++++--------- 8 files changed, 45 insertions(+), 28 deletions(-) (limited to 'src/runtime') diff --git a/src/runtime/CL/CLTensor.cpp b/src/runtime/CL/CLTensor.cpp index 732689e7ec..9bbf926b58 100644 --- a/src/runtime/CL/CLTensor.cpp +++ b/src/runtime/CL/CLTensor.cpp @@ -25,8 +25,8 @@ #include "arm_compute/runtime/CL/CLScheduler.h" -using namespace arm_compute; - +namespace arm_compute +{ CLTensor::CLTensor() : _allocator(this) { @@ -76,3 +76,9 @@ void CLTensor::do_unmap(cl::CommandQueue &q) { _allocator.unmap(q, buffer()); } + +void CLTensor::associate_memory_group(arm_compute::IMemoryGroup *memory_group) +{ + _allocator.set_associated_memory_group(memory_group); +} +} // namespace arm_compute \ No newline at end of file diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp index 72b5854c5c..60e1ca8df6 100644 --- a/src/runtime/CL/CLTensorAllocator.cpp +++ b/src/runtime/CL/CLTensorAllocator.cpp @@ -25,7 +25,6 @@ #include "arm_compute/core/Error.h" #include "arm_compute/core/TensorInfo.h" -#include "arm_compute/runtime/CL/CLMemoryGroup.h" #include "arm_compute/runtime/CL/CLScheduler.h" namespace arm_compute @@ -104,8 +103,8 @@ void populate_quantization_info(CLFloatArray &scale, CLInt32Array &offset, const } } // namespace -CLTensorAllocator::CLTensorAllocator(CLTensor *owner) - : _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _owner(owner), _scale(), _offset() +CLTensorAllocator::CLTensorAllocator(IMemoryManageable *owner) + : _owner(owner), _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _scale(), _offset() { } @@ -143,7 +142,7 @@ void CLTensorAllocator::allocate() } else { - _associated_memory_group->finalize_memory(_owner, _memory, info().total_size()); + _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment()); } // Allocate and fill the quantization parameter arrays @@ -178,7 +177,7 @@ Status CLTensorAllocator::import_memory(cl::Buffer buffer) return Status{}; } -void CLTensorAllocator::set_associated_memory_group(CLMemoryGroup *associated_memory_group) +void CLTensorAllocator::set_associated_memory_group(IMemoryGroup *associated_memory_group) { ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr); ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr && _associated_memory_group != associated_memory_group); diff --git a/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp b/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp index 63a45aae8e..eaf7c66083 100644 --- a/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp +++ b/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp @@ -26,6 +26,7 @@ #include "arm_compute/core/CL/OpenCL.h" #include "arm_compute/core/Utils.h" #include "arm_compute/runtime/CL/CLScheduler.h" +#include "arm_compute/runtime/CL/CLTensor.h" namespace arm_compute { diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp index 7e41dba8ab..73add97ef1 100644 --- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp +++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp @@ -30,7 +30,6 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/runtime/CL/CLMemoryGroup.h" #include "arm_compute/runtime/CL/CLScheduler.h" namespace arm_compute diff --git a/src/runtime/GLES_COMPUTE/GCTensor.cpp b/src/runtime/GLES_COMPUTE/GCTensor.cpp index e193d26f0a..66c1abdb6d 100644 --- a/src/runtime/GLES_COMPUTE/GCTensor.cpp +++ b/src/runtime/GLES_COMPUTE/GCTensor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,8 +24,8 @@ #include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h" -using namespace arm_compute; - +namespace arm_compute +{ GCTensor::GCTensor() : _allocator(this) { @@ -56,6 +56,11 @@ GLuint GCTensor::gc_buffer() const return _allocator.get_gl_ssbo_name(); } +void GCTensor::associate_memory_group(arm_compute::IMemoryGroup *memory_group) +{ + _allocator.set_associated_memory_group(memory_group); +} + void GCTensor::map(bool blocking) { IGCTensor::map(blocking); @@ -74,4 +79,5 @@ uint8_t *GCTensor::do_map(bool blocking) void GCTensor::do_unmap() { _allocator.unmap(); -} \ No newline at end of file +} +} // namespace arm_compute \ No newline at end of file diff --git a/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp b/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp index 9a5d139517..cccc6a75c6 100644 --- a/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp +++ b/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp @@ -32,8 +32,8 @@ using namespace arm_compute; -GCTensorAllocator::GCTensorAllocator(GCTensor *owner) - : _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _owner(owner) +GCTensorAllocator::GCTensorAllocator(IMemoryManageable *owner) + : _owner(owner), _associated_memory_group(nullptr), _memory(), _mapping(nullptr) { } @@ -50,7 +50,7 @@ void GCTensorAllocator::allocate() } else { - _associated_memory_group->finalize_memory(_owner, _memory, info().total_size()); + _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment()); } info().set_is_resizable(false); } @@ -62,7 +62,7 @@ void GCTensorAllocator::free() info().set_is_resizable(true); } -void GCTensorAllocator::set_associated_memory_group(GCMemoryGroup *associated_memory_group) +void GCTensorAllocator::set_associated_memory_group(IMemoryGroup *associated_memory_group) { ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr); ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr && _associated_memory_group != associated_memory_group); diff --git a/src/runtime/Tensor.cpp b/src/runtime/Tensor.cpp index a76c37e3d0..de08efd731 100644 --- a/src/runtime/Tensor.cpp +++ b/src/runtime/Tensor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -23,8 +23,8 @@ */ #include "arm_compute/runtime/Tensor.h" -using namespace arm_compute; - +namespace arm_compute +{ Tensor::Tensor() : _allocator(this) { @@ -49,3 +49,9 @@ TensorAllocator *Tensor::allocator() { return &_allocator; } + +void Tensor::associate_memory_group(IMemoryGroup *memory_group) +{ + _allocator.set_associated_memory_group(memory_group); +} +} // namespace arm_compute \ No newline at end of file diff --git a/src/runtime/TensorAllocator.cpp b/src/runtime/TensorAllocator.cpp index dfe239c586..7352932ac7 100644 --- a/src/runtime/TensorAllocator.cpp +++ b/src/runtime/TensorAllocator.cpp @@ -66,8 +66,8 @@ bool validate_subtensor_shape(const TensorInfo &parent_info, const TensorInfo &c } } // namespace -TensorAllocator::TensorAllocator(Tensor *owner) - : _associated_memory_group(nullptr), _memory(), _owner(owner) +TensorAllocator::TensorAllocator(IMemoryManageable *owner) + : _owner(owner), _associated_memory_group(nullptr), _memory() { } @@ -78,28 +78,28 @@ TensorAllocator::~TensorAllocator() TensorAllocator::TensorAllocator(TensorAllocator &&o) noexcept : ITensorAllocator(std::move(o)), + _owner(o._owner), _associated_memory_group(o._associated_memory_group), - _memory(std::move(o._memory)), - _owner(o._owner) + _memory(std::move(o._memory)) { + o._owner = nullptr; o._associated_memory_group = nullptr; o._memory = Memory(); - o._owner = nullptr; } TensorAllocator &TensorAllocator::operator=(TensorAllocator &&o) noexcept { if(&o != this) { + _owner = o._owner; + o._owner = nullptr; + _associated_memory_group = o._associated_memory_group; o._associated_memory_group = nullptr; _memory = std::move(o._memory); o._memory = Memory(); - _owner = o._owner; - o._owner = nullptr; - ITensorAllocator::operator=(std::move(o)); } return *this; @@ -161,7 +161,7 @@ Status TensorAllocator::import_memory(void *memory) return Status{}; } -void TensorAllocator::set_associated_memory_group(MemoryGroup *associated_memory_group) +void TensorAllocator::set_associated_memory_group(IMemoryGroup *associated_memory_group) { ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr); ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr && _associated_memory_group != associated_memory_group); -- cgit v1.2.1