aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-11-07 13:24:57 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit84b51ad1aaa530d397761f2b6da65add9dc8a6b0 (patch)
tree4f0150ef15c6bbff287658c517e3257513e4dc3e /src
parentc5d54397f6da490442f93ae880b361c45969f6b1 (diff)
downloadComputeLibrary-84b51ad1aaa530d397761f2b6da65add9dc8a6b0.tar.gz
COMPMID-666: Adds the ability to import backing memory in a Tensor(CPU).
Change-Id: I62b843b544fe9048837fd64c22e970fc6a0aaf23 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94881 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/runtime/Memory.cpp62
-rw-r--r--src/runtime/TensorAllocator.cpp53
2 files changed, 91 insertions, 24 deletions
diff --git a/src/runtime/Memory.cpp b/src/runtime/Memory.cpp
new file mode 100644
index 0000000000..35d0c824bb
--- /dev/null
+++ b/src/runtime/Memory.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/Memory.h"
+
+#include "arm_compute/core/Error.h"
+
+using namespace arm_compute;
+
+Memory::Memory()
+ : _memory(nullptr), _memory_owned(nullptr)
+{
+}
+
+Memory::Memory(std::shared_ptr<uint8_t> memory)
+ : _memory(nullptr), _memory_owned(std::move(memory))
+{
+ ARM_COMPUTE_ERROR_ON(_memory_owned.get() == nullptr);
+ _memory = _memory_owned.get();
+}
+
+Memory::Memory(uint8_t *memory)
+ : _memory(memory), _memory_owned(nullptr)
+{
+ ARM_COMPUTE_ERROR_ON(memory == nullptr);
+}
+
+uint8_t *Memory::buffer()
+{
+ return _memory;
+}
+
+uint8_t *Memory::buffer() const
+{
+ return _memory;
+}
+
+uint8_t **Memory::handle()
+{
+ ARM_COMPUTE_ERROR_ON(_memory_owned.get() != nullptr);
+ return &_memory;
+} \ No newline at end of file
diff --git a/src/runtime/TensorAllocator.cpp b/src/runtime/TensorAllocator.cpp
index 272b9f5695..25bd479c84 100644
--- a/src/runtime/TensorAllocator.cpp
+++ b/src/runtime/TensorAllocator.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/MemoryGroup.h"
+#include "support/ToolchainSupport.h"
#include <cstddef>
@@ -65,28 +66,23 @@ bool validate_subtensor_shape(const TensorInfo &parent_info, const TensorInfo &c
} // namespace
TensorAllocator::TensorAllocator(Tensor *owner)
- : _associated_memory_group(nullptr), _buffer(nullptr), _owner(owner)
+ : _associated_memory_group(nullptr), _memory(), _owner(owner)
{
}
TensorAllocator::~TensorAllocator()
{
- if((_associated_memory_group == nullptr) && (_buffer != nullptr))
- {
- delete[] _buffer;
- _buffer = nullptr;
- info().set_is_resizable(true);
- }
+ info().set_is_resizable(true);
}
TensorAllocator::TensorAllocator(TensorAllocator &&o) noexcept
: ITensorAllocator(std::move(o)),
_associated_memory_group(o._associated_memory_group),
- _buffer(o._buffer),
+ _memory(std::move(o._memory)),
_owner(o._owner)
{
o._associated_memory_group = nullptr;
- o._buffer = nullptr;
+ o._memory = Memory();
o._owner = nullptr;
}
@@ -97,8 +93,8 @@ TensorAllocator &TensorAllocator::operator=(TensorAllocator &&o) noexcept
_associated_memory_group = o._associated_memory_group;
o._associated_memory_group = nullptr;
- _buffer = o._buffer;
- o._buffer = nullptr;
+ _memory = std::move(o._memory);
+ o._memory = Memory();
_owner = o._owner;
o._owner = nullptr;
@@ -118,7 +114,7 @@ void TensorAllocator::init(const TensorAllocator &allocator, const Coordinates &
ARM_COMPUTE_UNUSED(validate_subtensor_shape);
// Copy pointer to buffer
- _buffer = allocator._buffer;
+ _memory = Memory(allocator._memory.buffer());
// Init tensor info with new dimensions
size_t total_size = parent_info.offset_element_in_bytes(coords) + sub_info.total_size() - sub_info.offset_first_element_in_bytes();
@@ -130,44 +126,53 @@ void TensorAllocator::init(const TensorAllocator &allocator, const Coordinates &
uint8_t *TensorAllocator::data() const
{
- return _buffer;
+ return _memory.buffer();
}
void TensorAllocator::allocate()
{
- ARM_COMPUTE_ERROR_ON(_buffer != nullptr);
+ ARM_COMPUTE_ERROR_ON(_memory.buffer() != nullptr);
if(_associated_memory_group == nullptr)
{
- _buffer = new uint8_t[info().total_size()]();
+ _memory = Memory(std::shared_ptr<uint8_t>(new uint8_t[info().total_size()](), [](uint8_t *ptr)
+ {
+ delete[] ptr;
+ }));
}
else
{
- _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(&_buffer), info().total_size());
+ _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(_memory.handle()), info().total_size());
}
info().set_is_resizable(false);
}
void TensorAllocator::free()
{
- if((_associated_memory_group == nullptr) && (_buffer != nullptr))
- {
- delete[] _buffer;
- _buffer = nullptr;
- info().set_is_resizable(true);
- }
+ _memory = Memory();
+ info().set_is_resizable(true);
+}
+
+arm_compute::Error TensorAllocator::import_memory(Memory memory)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON(memory.buffer() == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
+ _memory = memory;
+ info().set_is_resizable(false);
+
+ return Error{};
}
void TensorAllocator::set_associated_memory_group(MemoryGroup *associated_memory_group)
{
ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
- ARM_COMPUTE_ERROR_ON(_buffer != nullptr);
+ ARM_COMPUTE_ERROR_ON(_memory.buffer() != nullptr);
_associated_memory_group = associated_memory_group;
}
uint8_t *TensorAllocator::lock()
{
- return _buffer;
+ return _memory.buffer();
}
void TensorAllocator::unlock()