aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-11-14 13:16:56 +0000
committerIsabella Gottardi <isabella.gottardi@arm.com>2018-11-21 09:52:04 +0000
commitdf3103622b7de05f4e35b22a2c94b4a46eab4efc (patch)
tree17e10253e7a069c69d10bea0882b699b99d74b86
parentc47ef20d69e8ea0f519fdc679435cd7037fc18fe (diff)
downloadComputeLibrary-df3103622b7de05f4e35b22a2c94b4a46eab4efc.tar.gz
COMPMID-1088: Use IMemoryRegion in interfaces where possible
-Simplifies import memory interface -Changes the used of void** handles with appropriate interfaces. Change-Id: I5918c855c11f46352058864623336b352162a4b7
-rw-r--r--arm_compute/core/CL/OpenCL.h1
-rw-r--r--arm_compute/core/utils/misc/Cast.h47
-rw-r--r--arm_compute/runtime/BlobMemoryPool.h9
-rw-r--r--arm_compute/runtime/CL/CLMemory.h20
-rw-r--r--arm_compute/runtime/CL/CLMemoryRegion.h25
-rw-r--r--arm_compute/runtime/CL/CLTensorAllocator.h14
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/GCMemory.h85
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h108
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h28
-rw-r--r--arm_compute/runtime/ILifetimeManager.h12
-rw-r--r--arm_compute/runtime/IMemory.h63
-rw-r--r--arm_compute/runtime/IMemoryRegion.h23
-rw-r--r--arm_compute/runtime/ISimpleLifetimeManager.h12
-rw-r--r--arm_compute/runtime/Memory.h24
-rw-r--r--arm_compute/runtime/MemoryGroupBase.h18
-rw-r--r--arm_compute/runtime/MemoryRegion.h34
-rw-r--r--arm_compute/runtime/OffsetMemoryPool.h11
-rw-r--r--arm_compute/runtime/TensorAllocator.h10
-rw-r--r--arm_compute/runtime/Types.h6
-rw-r--r--src/core/CL/OpenCL.cpp19
-rw-r--r--src/runtime/BlobMemoryPool.cpp14
-rw-r--r--src/runtime/CL/CLMemory.cpp34
-rw-r--r--src/runtime/CL/CLMemoryRegion.cpp11
-rw-r--r--src/runtime/CL/CLTensorAllocator.cpp73
-rw-r--r--src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp18
-rw-r--r--src/runtime/GLES_COMPUTE/GCMemory.cpp81
-rw-r--r--src/runtime/GLES_COMPUTE/GCMemoryRegion.cpp96
-rw-r--r--src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp40
-rw-r--r--src/runtime/ISimpleLifetimeManager.cpp5
-rw-r--r--src/runtime/Memory.cpp18
-rw-r--r--src/runtime/OffsetMemoryPool.cpp16
-rw-r--r--src/runtime/TensorAllocator.cpp26
-rw-r--r--tests/validation/CL/UNIT/TensorAllocator.cpp19
-rw-r--r--tests/validation/NEON/UNIT/TensorAllocator.cpp35
34 files changed, 785 insertions, 270 deletions
diff --git a/arm_compute/core/CL/OpenCL.h b/arm_compute/core/CL/OpenCL.h
index be6ade65a5..65cbb3282b 100644
--- a/arm_compute/core/CL/OpenCL.h
+++ b/arm_compute/core/CL/OpenCL.h
@@ -117,6 +117,7 @@ public:
DECLARE_FUNCTION_PTR(clReleaseMemObject);
DECLARE_FUNCTION_PTR(clGetDeviceInfo);
DECLARE_FUNCTION_PTR(clGetDeviceIDs);
+ DECLARE_FUNCTION_PTR(clGetMemObjectInfo);
DECLARE_FUNCTION_PTR(clRetainEvent);
DECLARE_FUNCTION_PTR(clGetPlatformIDs);
DECLARE_FUNCTION_PTR(clGetKernelWorkGroupInfo);
diff --git a/arm_compute/core/utils/misc/Cast.h b/arm_compute/core/utils/misc/Cast.h
index f6c91dd2de..5d9d1b0eca 100644
--- a/arm_compute/core/utils/misc/Cast.h
+++ b/arm_compute/core/utils/misc/Cast.h
@@ -41,7 +41,7 @@ namespace cast
*
* @param[in] v Value to cast
*
- * @return The casted type
+ * @return The casted value
*/
template <typename Target, typename Source>
inline Target polymorphic_cast(Source *v)
@@ -62,7 +62,7 @@ inline Target polymorphic_cast(Source *v)
*
* @param[in] v Value to cast
*
- * @return The casted type
+ * @return The casted value
*/
template <typename Target, typename Source>
inline Target polymorphic_downcast(Source *v)
@@ -70,6 +70,49 @@ inline Target polymorphic_downcast(Source *v)
ARM_COMPUTE_ERROR_ON(dynamic_cast<Target>(v) != static_cast<Target>(v));
return static_cast<Target>(v);
}
+
+/** Polymorphic cast between two unique pointer types
+ *
+ * @warning Will throw an exception if cast cannot take place
+ *
+ * @tparam Target Target to cast type
+ * @tparam Source Source from cast type
+ * @tparam Deleter Deleter function type
+ *
+ * @param[in] v Value to cast
+ *
+ * @return The casted value
+ */
+template <typename Target, typename Source, typename Deleter>
+std::unique_ptr<Target, Deleter> polymorphic_cast_unique_ptr(std::unique_ptr<Source, Deleter> &&v)
+{
+ if(dynamic_cast<Target *>(v.get()) == nullptr)
+ {
+ throw std::bad_cast();
+ }
+ auto r = static_cast<Target *>(v.release());
+ return std::unique_ptr<Target, Deleter>(r, std::move(v.get_deleter()));
+}
+
+/** Polymorphic down cast between two unique pointer types
+ *
+ * @warning Will assert if cannot take place
+ *
+ * @tparam Target Target to cast type
+ * @tparam Source Source from cast type
+ * @tparam Deleter Deleter function type
+ *
+ * @param[in] v Value to cast
+ *
+ * @return The casted value
+ */
+template <typename Target, typename Source, typename Deleter>
+std::unique_ptr<Target, Deleter> polymorphic_downcast_unique_ptr(std::unique_ptr<Source, Deleter> &&v)
+{
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<Target *>(v.get()) != static_cast<Target *>(v.get()));
+ auto r = static_cast<Target *>(v.release());
+ return std::unique_ptr<Target, Deleter>(r, std::move(v.get_deleter()));
+}
} // namespace cast
} // namespace utils
} // namespace arm_compute
diff --git a/arm_compute/runtime/BlobMemoryPool.h b/arm_compute/runtime/BlobMemoryPool.h
index 25bfd539f6..c9c4da0f54 100644
--- a/arm_compute/runtime/BlobMemoryPool.h
+++ b/arm_compute/runtime/BlobMemoryPool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,7 @@
#include "arm_compute/runtime/IMemoryPool.h"
+#include "arm_compute/runtime/IMemoryRegion.h"
#include "arm_compute/runtime/Types.h"
#include <cstddef>
@@ -75,9 +76,9 @@ private:
void free_blobs();
private:
- IAllocator *_allocator; /**< Allocator to use for internal allocation */
- std::vector<void *> _blobs; /**< Vector holding all the memory blobs */
- std::vector<size_t> _blob_sizes; /**< Sizes of each blob */
+ IAllocator *_allocator; /**< Allocator to use for internal allocation */
+ std::vector<std::unique_ptr<IMemoryRegion>> _blobs; /**< Vector holding all the memory blobs */
+ std::vector<size_t> _blob_sizes; /**< Sizes of each blob */
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_BLOBMEMORYPOOL_H__ */
diff --git a/arm_compute/runtime/CL/CLMemory.h b/arm_compute/runtime/CL/CLMemory.h
index edd9de8097..02d36614ae 100644
--- a/arm_compute/runtime/CL/CLMemory.h
+++ b/arm_compute/runtime/CL/CLMemory.h
@@ -24,6 +24,8 @@
#ifndef __ARM_COMPUTE_RUNTIME_CL_CLMEMORY_H__
#define __ARM_COMPUTE_RUNTIME_CL_CLMEMORY_H__
+#include "arm_compute/runtime/IMemory.h"
+
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/runtime/CL/CLMemoryRegion.h"
@@ -33,7 +35,7 @@
namespace arm_compute
{
/** OpenCL implementation of memory object */
-class CLMemory
+class CLMemory : public IMemory
{
public:
/** Default Constructor */
@@ -59,20 +61,22 @@ public:
CLMemory(CLMemory &&) noexcept = default;
/** Allow instances of this class to be move assigned */
CLMemory &operator=(CLMemory &&) noexcept = default;
- /** Region accessor
+ /** OpenCL Region accessor
*
* @return Memory region
*/
- ICLMemoryRegion *region();
- /** Region accessor
+ ICLMemoryRegion *cl_region();
+ /** OpenCL Region accessor
*
* @return Memory region
*/
- ICLMemoryRegion *region() const;
+ ICLMemoryRegion *cl_region() const;
-private:
- /** Creates empty region */
- void create_empty_region();
+ // Inherited methods overridden:
+ IMemoryRegion *region() final;
+ IMemoryRegion *region() const final;
+ void set_region(IMemoryRegion *region) final;
+ void set_owned_region(std::unique_ptr<IMemoryRegion> region) final;
private:
ICLMemoryRegion *_region;
diff --git a/arm_compute/runtime/CL/CLMemoryRegion.h b/arm_compute/runtime/CL/CLMemoryRegion.h
index 01dd54e391..dbfd8225ca 100644
--- a/arm_compute/runtime/CL/CLMemoryRegion.h
+++ b/arm_compute/runtime/CL/CLMemoryRegion.h
@@ -81,9 +81,9 @@ public:
virtual void unmap(cl::CommandQueue &q) = 0;
// Inherited methods overridden :
- void *buffer() override;
- void *buffer() const override;
- void **handle() override;
+ void *buffer() override;
+ void *buffer() const override;
+ std::unique_ptr<IMemoryRegion> extract_subregion(size_t offset, size_t size) override;
protected:
cl::Context _ctx;
@@ -102,11 +102,16 @@ public:
* @param[in] size Region size
*/
CLBufferMemoryRegion(cl::Context ctx, cl_mem_flags flags, size_t size);
+ /** Constructor
+ *
+ * @param[in] buffer Buffer to be used as a memory region
+ */
+ CLBufferMemoryRegion(const cl::Buffer &buffer);
// Inherited methods overridden :
- void *ptr() override;
- void *map(cl::CommandQueue &q, bool blocking) override;
- void unmap(cl::CommandQueue &q) override;
+ void *ptr() final;
+ void *map(cl::CommandQueue &q, bool blocking) final;
+ void unmap(cl::CommandQueue &q) final;
};
/** OpenCL SVM memory region interface */
@@ -153,8 +158,8 @@ public:
CLCoarseSVMMemoryRegion(cl::Context ctx, cl_mem_flags flags, size_t size, size_t alignment);
// Inherited methods overridden :
- void *map(cl::CommandQueue &q, bool blocking) override;
- void unmap(cl::CommandQueue &q) override;
+ void *map(cl::CommandQueue &q, bool blocking) final;
+ void unmap(cl::CommandQueue &q) final;
};
/** OpenCL fine-grain SVM memory region implementation */
@@ -171,8 +176,8 @@ public:
CLFineSVMMemoryRegion(cl::Context ctx, cl_mem_flags flags, size_t size, size_t alignment);
// Inherited methods overridden :
- void *map(cl::CommandQueue &q, bool blocking) override;
- void unmap(cl::CommandQueue &q) override;
+ void *map(cl::CommandQueue &q, bool blocking) final;
+ void unmap(cl::CommandQueue &q) final;
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_RUNTIME_CL_CL_MEMORY_REGION_H__ */
diff --git a/arm_compute/runtime/CL/CLTensorAllocator.h b/arm_compute/runtime/CL/CLTensorAllocator.h
index a372195555..de5f482d05 100644
--- a/arm_compute/runtime/CL/CLTensorAllocator.h
+++ b/arm_compute/runtime/CL/CLTensorAllocator.h
@@ -103,17 +103,13 @@ public:
void free() override;
/** Import an existing memory as a tensor's backing memory
*
- * @warning If the tensor is flagged to be managed by a memory manager,
- * this call will lead to an error.
- * @warning Ownership of memory depends on the way the @ref CLMemory object was constructed
- * @note Calling free on a tensor with imported memory will just clear
- * the internal pointer value.
+ * @warning ownership of memory is not transferred
*
- * @param[in] memory Memory to import
+ * @param[in] buffer Buffer to import
*
* @return error status
*/
- arm_compute::Status import_memory(CLMemory memory);
+ arm_compute::Status import_memory(cl::Buffer buffer);
/** Associates the tensor with a memory group
*
* @param[in] associated_memory_group Memory group to associate the tensor with
@@ -130,8 +126,12 @@ protected:
void unlock() override;
private:
+ static const cl::Buffer _empty_buffer;
+
+private:
CLMemoryGroup *_associated_memory_group; /**< Registered memory manager */
CLMemory _memory; /**< OpenCL memory */
+ uint8_t *_mapping; /**< Pointer to the CPU mapping of the OpenCL buffer. */
CLTensor *_owner; /**< Owner of the allocator */
};
} // namespace arm_compute
diff --git a/arm_compute/runtime/GLES_COMPUTE/GCMemory.h b/arm_compute/runtime/GLES_COMPUTE/GCMemory.h
new file mode 100644
index 0000000000..bf0428a341
--- /dev/null
+++ b/arm_compute/runtime/GLES_COMPUTE/GCMemory.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_RUNTIME_GLES_COMPUTE_GCMEMORY_H__
+#define __ARM_COMPUTE_RUNTIME_GLES_COMPUTE_GCMEMORY_H__
+
+#include "arm_compute/runtime/IMemory.h"
+
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
+
+#include <cstddef>
+#include <memory>
+
+namespace arm_compute
+{
+/** GLES implementation of memory object */
+class GCMemory : public IMemory
+{
+public:
+ /** Default Constructor */
+ GCMemory();
+ /** Default Constructor
+ *
+ * @param[in] memory Memory to be imported
+ */
+ GCMemory(std::shared_ptr<IGCMemoryRegion> memory);
+ /** Default Constructor
+ *
+ * @note Ownership of the memory is not transferred to this object.
+ * Thus management (allocate/free) should be done by the client.
+ *
+ * @param[in] memory Memory to be imported
+ */
+ GCMemory(IGCMemoryRegion *memory);
+ /** Allow instances of this class to be copied */
+ GCMemory(const GCMemory &) = default;
+ /** Allow instances of this class to be copy assigned */
+ GCMemory &operator=(const GCMemory &) = default;
+ /** Allow instances of this class to be moved */
+ GCMemory(GCMemory &&) noexcept = default;
+ /** Allow instances of this class to be move assigned */
+ GCMemory &operator=(GCMemory &&) noexcept = default;
+ /** GLES Region accessor
+ *
+ * @return Memory region
+ */
+ IGCMemoryRegion *gc_region();
+ /** GLES Region accessor
+ *
+ * @return Memory region
+ */
+ IGCMemoryRegion *gc_region() const;
+
+ // Inherited methods overridden:
+ IMemoryRegion *region() final;
+ IMemoryRegion *region() const final;
+ void set_region(IMemoryRegion *region) final;
+ void set_owned_region(std::unique_ptr<IMemoryRegion> region) final;
+
+private:
+ IGCMemoryRegion *_region;
+ std::shared_ptr<IGCMemoryRegion> _region_owned;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_RUNTIME_GLES_COMPUTE_GCMEMORY_H__ */
diff --git a/arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h b/arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h
new file mode 100644
index 0000000000..23e3cebe3f
--- /dev/null
+++ b/arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_RUNTIME_GLES_COMPUTE_GC_MEMORY_REGION_H__
+#define __ARM_COMPUTE_RUNTIME_GLES_COMPUTE_GC_MEMORY_REGION_H__
+
+#include "arm_compute/core/GLES_COMPUTE/OpenGLES.h"
+#include "arm_compute/runtime/IMemoryRegion.h"
+
+#include <cstddef>
+
+namespace arm_compute
+{
+/** GLES memory region interface */
+class IGCMemoryRegion : public IMemoryRegion
+{
+public:
+ /** Constructor
+ *
+ * @param[in] size Region size
+ */
+ IGCMemoryRegion(size_t size);
+ /** Default Destructor */
+ virtual ~IGCMemoryRegion() = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ IGCMemoryRegion(const IGCMemoryRegion &) = delete;
+ /** Default move constructor */
+ IGCMemoryRegion(IGCMemoryRegion &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ IGCMemoryRegion &operator=(const IGCMemoryRegion &) = delete;
+ /** Default move assignment operator */
+ IGCMemoryRegion &operator=(IGCMemoryRegion &&) = default;
+ /** Returns the underlying CL buffer
+ *
+ * @return CL memory buffer object
+ */
+ const GLuint &gc_ssbo_name() const;
+ /** Host/SVM pointer accessor
+ *
+ * @return Host/SVM pointer base
+ */
+ virtual void *ptr() = 0;
+ /** Enqueue a map operation of the allocated buffer on the given queue.
+ *
+ * @param[in] blocking If true, then the mapping will be ready to use by the time
+ * this method returns, else it is the caller's responsibility
+ * to flush the queue and wait for the mapping operation to have completed before using the returned mapping pointer.
+ *
+ * @return The mapping address.
+ */
+ virtual void *map(bool blocking) = 0;
+ /** Enqueue an unmap operation of the allocated buffer on the given queue.
+ *
+ * @note This method simply enqueue the unmap operation, it is the caller's responsibility to flush the queue and make sure the unmap is finished before
+ * the memory is accessed by the device.
+ *
+ */
+ virtual void unmap() = 0;
+
+ // Inherited methods overridden :
+ void *buffer() override;
+ void *buffer() const override;
+
+protected:
+ void *_mapping;
+ GLuint _ssbo_name;
+};
+
+/** GLES buffer memory region implementation */
+class GCBufferMemoryRegion final : public IGCMemoryRegion
+{
+public:
+ /** Constructor
+ *
+ * @param[in] size Region size
+ */
+ GCBufferMemoryRegion(size_t size);
+ /** Destructor */
+ ~GCBufferMemoryRegion();
+
+ // Inherited methods overridden :
+ void *ptr() final;
+ void *map(bool blocking) final;
+ void unmap() final;
+ std::unique_ptr<IMemoryRegion> extract_subregion(size_t offset, size_t size) final;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_RUNTIME_GLES_COMPUTE_GC_MEMORY_REGION_H__ */
diff --git a/arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h b/arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h
index 1bd3582b6c..b7067664fc 100644
--- a/arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h
+++ b/arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h
@@ -26,6 +26,7 @@
#define __ARM_COMPUTE_GCTENSORALLOCATOR_H__
#include "arm_compute/core/GLES_COMPUTE/OpenGLES.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemory.h"
#include "arm_compute/runtime/ITensorAllocator.h"
#include "arm_compute/runtime/MemoryGroupBase.h"
@@ -38,20 +39,6 @@ template <typename>
class MemoryGroupBase;
using GCMemoryGroup = MemoryGroupBase<GCTensor>;
-class GLBufferWrapper
-{
-public:
- GLBufferWrapper()
- : _ssbo_name(0)
- {
- ARM_COMPUTE_GL_CHECK(glGenBuffers(1, &_ssbo_name));
- }
- ~GLBufferWrapper()
- {
- ARM_COMPUTE_GL_CHECK(glDeleteBuffers(1, &_ssbo_name));
- }
- GLuint _ssbo_name;
-};
/** Basic implementation of a GLES memory tensor allocator. */
class GCTensorAllocator : public ITensorAllocator
{
@@ -72,7 +59,7 @@ public:
GCTensorAllocator &operator=(GCTensorAllocator &&) = default;
/** Default destructor */
- ~GCTensorAllocator();
+ ~GCTensorAllocator() = default;
/** Interface to be implemented by the child class to return the pointer to the mapped data.
*
@@ -135,11 +122,10 @@ protected:
void unlock() override;
private:
- GCMemoryGroup *_associated_memory_group; /**< Registered memory group */
- std::unique_ptr<GLBufferWrapper> _gl_buffer; /**< OpenGL ES object containing the tensor data. */
- uint8_t *_mapping; /**< Pointer to the CPU mapping of the OpenGL ES buffer. */
- GCTensor *_owner; /**< Owner of the allocator */
+ GCMemoryGroup *_associated_memory_group; /**< Registered memory group */
+ GCMemory _memory; /**< OpenGL ES memory */
+ uint8_t *_mapping; /**< Pointer to the CPU mapping of the OpenGL ES buffer. */
+ GCTensor *_owner; /**< Owner of the allocator */
};
-}
-
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_GCTENSORALLOCATOR_H__ */
diff --git a/arm_compute/runtime/ILifetimeManager.h b/arm_compute/runtime/ILifetimeManager.h
index 36743ac404..f2e9b497c9 100644
--- a/arm_compute/runtime/ILifetimeManager.h
+++ b/arm_compute/runtime/ILifetimeManager.h
@@ -32,8 +32,10 @@
namespace arm_compute
{
-class IMemoryGroup;
+// Forward declarations
class IAllocator;
+class IMemory;
+class IMemoryGroup;
/** Interface for managing the lifetime of objects */
class ILifetimeManager
@@ -53,11 +55,11 @@ public:
virtual void start_lifetime(void *obj) = 0;
/** Ends lifetime of an object
*
- * @param[in] obj Object
- * @param[in] handle Memory handle of the object
- * @param[in] size Size of the given object at given time
+ * @param[in] obj Object
+ * @param[in] obj_memory Object memory
+ * @param[in] size Size of the given object at given time
*/
- virtual void end_lifetime(void *obj, void **handle, size_t size) = 0;
+ virtual void end_lifetime(void *obj, IMemory &obj_memory, size_t size) = 0;
/** Checks if the lifetime of the registered object is complete
*
* @return True if all object lifetimes are finalized else false.
diff --git a/arm_compute/runtime/IMemory.h b/arm_compute/runtime/IMemory.h
new file mode 100644
index 0000000000..e6f5058d94
--- /dev/null
+++ b/arm_compute/runtime/IMemory.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_IMEMORY_H__
+#define __ARM_COMPUTE_IMEMORY_H__
+
+#include "arm_compute/runtime/IMemoryRegion.h"
+
+namespace arm_compute
+{
+/** Memory interface*/
+class IMemory
+{
+public:
+ /** Virtual default destructor */
+ virtual ~IMemory() = default;
+ /** Region accessor
+ *
+ * @return Memory region
+ */
+ virtual IMemoryRegion *region() = 0;
+ /** Region accessor
+ *
+ * @return Memory region
+ */
+ virtual IMemoryRegion *region() const = 0;
+ /** Sets a memory region
+ *
+ * @warning Ownership of the memory region remains to the caller
+ *
+ * @param region Memory region
+ */
+ virtual void set_region(IMemoryRegion *region) = 0;
+ /** Sets a memory region
+ *
+ * @warning Ownership of the memory region is transfered along
+ *
+ * @param region Memory region
+ */
+ virtual void set_owned_region(std::unique_ptr<IMemoryRegion> region) = 0;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_IMEMORY_H__ */
diff --git a/arm_compute/runtime/IMemoryRegion.h b/arm_compute/runtime/IMemoryRegion.h
index 4c08b4ab09..aa8c07ef2a 100644
--- a/arm_compute/runtime/IMemoryRegion.h
+++ b/arm_compute/runtime/IMemoryRegion.h
@@ -25,6 +25,7 @@
#define __ARM_COMPUTE_RUNTIME_IMEMORY_REGION_H__
#include <cstddef>
+#include <memory>
namespace arm_compute
{
@@ -36,12 +37,25 @@ public:
*
* @param[in] size Region size
*/
- IMemoryRegion(size_t size)
+ explicit IMemoryRegion(size_t size)
: _size(size)
{
}
/** Virtual Destructor */
virtual ~IMemoryRegion() = default;
+ /** Extract a sub-region from the memory
+ *
+ * @warning Ownership is maintained by the parent memory,
+ * while a wrapped raw memory region is returned by this function.
+ * Thus parent memory should not be released before this.
+ *
+ *
+ * @param[in] offset Offset to the region
+ * @param[in] size Size of the region
+ *
+ * @return A wrapped memory sub-region with no ownership of the underlying memory
+ */
+ virtual std::unique_ptr<IMemoryRegion> extract_subregion(size_t offset, size_t size) = 0;
/** Returns the pointer to the allocated data.
*
* @return Pointer to the allocated data
@@ -52,16 +66,11 @@ public:
* @return Pointer to the allocated data
*/
virtual void *buffer() const = 0;
- /** Handle of internal memory
- *
- * @return Handle of memory
- */
- virtual void **handle() = 0;
/** Memory region size accessor
*
* @return Memory region size
*/
- size_t size()
+ size_t size() const
{
return _size;
}
diff --git a/arm_compute/runtime/ISimpleLifetimeManager.h b/arm_compute/runtime/ISimpleLifetimeManager.h
index 7942e40f7f..f2eb4f5904 100644
--- a/arm_compute/runtime/ISimpleLifetimeManager.h
+++ b/arm_compute/runtime/ISimpleLifetimeManager.h
@@ -58,7 +58,7 @@ public:
// Inherited methods overridden:
void register_group(IMemoryGroup *group) override;
void start_lifetime(void *obj) override;
- void end_lifetime(void *obj, void **handle, size_t size) override;
+ void end_lifetime(void *obj, IMemory &obj_memory, size_t size) override;
bool are_all_finalized() const override;
protected:
@@ -69,14 +69,14 @@ protected:
/** Element struct */
struct Element
{
- Element(void *id_ = nullptr, void **handle_ = nullptr, size_t size_ = 0, bool status_ = false)
+ Element(void *id_ = nullptr, IMemory *handle_ = nullptr, size_t size_ = 0, bool status_ = false)
: id(id_), handle(handle_), size(size_), status(status_)
{
}
- void *id; /**< Element id */
- void **handle; /**< Element's memory handle */
- size_t size; /**< Element's size */
- bool status; /**< Lifetime status */
+ void *id; /**< Element id */
+ IMemory *handle; /**< Element's memory handle */
+ size_t size; /**< Element's size */
+ bool status; /**< Lifetime status */
};
/** Blob struct */
diff --git a/arm_compute/runtime/Memory.h b/arm_compute/runtime/Memory.h
index 2dadccf254..6f5254a689 100644
--- a/arm_compute/runtime/Memory.h
+++ b/arm_compute/runtime/Memory.h
@@ -24,6 +24,8 @@
#ifndef __ARM_COMPUTE_MEMORY_H__
#define __ARM_COMPUTE_MEMORY_H__
+#include "arm_compute/runtime/IMemory.h"
+
#include "arm_compute/runtime/IMemoryRegion.h"
#include <cstddef>
@@ -32,7 +34,7 @@
namespace arm_compute
{
/** CPU implementation of memory object */
-class Memory
+class Memory : public IMemory
{
public:
/** Default Constructor */
@@ -58,24 +60,16 @@ public:
Memory(Memory &&) noexcept = default;
/** Allow instances of this class to be move assigned */
Memory &operator=(Memory &&) noexcept = default;
- /** Region accessor
- *
- * @return Memory region
- */
- IMemoryRegion *region();
- /** Region accessor
- *
- * @return Memory region
- */
- IMemoryRegion *region() const;
-private:
- /** Creates empty region */
- void create_empty_region();
+ // Inherited methods overridden:
+ IMemoryRegion *region() final;
+ IMemoryRegion *region() const final;
+ void set_region(IMemoryRegion *region) final;
+ void set_owned_region(std::unique_ptr<IMemoryRegion> region) final;
private:
IMemoryRegion *_region;
std::shared_ptr<IMemoryRegion> _region_owned;
};
-}
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_MEMORY_H__ */
diff --git a/arm_compute/runtime/MemoryGroupBase.h b/arm_compute/runtime/MemoryGroupBase.h
index 06e4321410..0ceaa900c5 100644
--- a/arm_compute/runtime/MemoryGroupBase.h
+++ b/arm_compute/runtime/MemoryGroupBase.h
@@ -35,6 +35,9 @@
namespace arm_compute
{
+// Forward declarations
+class IMemory;
+
/** Memory group */
template <typename TensorType>
class MemoryGroupBase : public IMemoryGroup
@@ -63,11 +66,12 @@ public:
*
* @note Manager must not be finalized
*
- * @param[in] obj Object to request memory for
- * @param[in] handle Handle to store the memory
- * @param[in] size Size of memory to allocate
+ * @param[in, out] obj Object to request memory for
+ * @param[in, out] obj_memory Object's memory handling interface which can be used to alter the underlying memory
+ * that is used by the object.
+ * @param[in] size Size of memory to allocate
*/
- void finalize_memory(TensorType *obj, void **handle, size_t size);
+ void finalize_memory(TensorType *obj, IMemory &obj_memory, size_t size);
// Inherited methods overridden:
void acquire() override;
@@ -112,16 +116,16 @@ inline void MemoryGroupBase<TensorType>::manage(TensorType *obj)
}
template <typename TensorType>
-inline void MemoryGroupBase<TensorType>::finalize_memory(TensorType *obj, void **handle, size_t size)
+inline void MemoryGroupBase<TensorType>::finalize_memory(TensorType *obj, IMemory &obj_memory, size_t size)
{
// TODO (geopin01) : Check size (track size in MemoryMappings)
// Check if existing mapping is valid
- ARM_COMPUTE_ERROR_ON(!_mappings.empty() && (_mappings.find(handle) == std::end(_mappings)));
+ ARM_COMPUTE_ERROR_ON(!_mappings.empty() && (_mappings.find(&obj_memory) == std::end(_mappings)));
if(_memory_manager && _mappings.empty())
{
ARM_COMPUTE_ERROR_ON(!_memory_manager->lifetime_manager());
- _memory_manager->lifetime_manager()->end_lifetime(obj, handle, size);
+ _memory_manager->lifetime_manager()->end_lifetime(obj, obj_memory, size);
}
}
diff --git a/arm_compute/runtime/MemoryRegion.h b/arm_compute/runtime/MemoryRegion.h
index 481b20d375..335486ed9d 100644
--- a/arm_compute/runtime/MemoryRegion.h
+++ b/arm_compute/runtime/MemoryRegion.h
@@ -37,13 +37,13 @@ namespace arm_compute
class MemoryRegion final : public IMemoryRegion
{
public:
- /** Default constructor
+ /** Constructor
*
* @param[in] size Region size
* @param[in] alignment Alignment in bytes of the base pointer. Defaults to 0
*/
MemoryRegion(size_t size, size_t alignment = 0)
- : IMemoryRegion(size), _mem(nullptr), _alignment(alignment), _offset(0)
+ : IMemoryRegion(size), _mem(nullptr), _ptr(nullptr)
{
if(size != 0)
{
@@ -53,16 +53,25 @@ public:
{
delete[] ptr;
});
+ _ptr = _mem.get();
// Calculate alignment offset
if(alignment != 0)
{
void *aligned_ptr = _mem.get();
support::cpp11::align(alignment, size, aligned_ptr, space);
- _offset = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(_mem.get());
+ _ptr = aligned_ptr;
}
}
}
+ MemoryRegion(void *ptr, size_t size)
+ : IMemoryRegion(size), _mem(nullptr), _ptr(nullptr)
+ {
+ if(size != 0)
+ {
+ _ptr = ptr;
+ }
+ }
/** Prevent instances of this class from being copied (As this class contains pointers) */
MemoryRegion(const MemoryRegion &) = delete;
/** Default move constructor */
@@ -75,22 +84,27 @@ public:
// Inherited methods overridden :
void *buffer() final
{
- return reinterpret_cast<void *>(_mem.get() + _offset);
+ return _ptr;
}
void *buffer() const final
{
- // FIXME (COMPMID-1088) : Remove handle() and _offset when done
- return reinterpret_cast<void *>(_mem.get() + _offset);
+ return _ptr;
}
- void **handle() final
+ std::unique_ptr<IMemoryRegion> extract_subregion(size_t offset, size_t size) final
{
- return reinterpret_cast<void **>(&_mem);
+ if(_ptr != nullptr && (offset < _size) && (_size - offset >= size))
+ {
+ return support::cpp14::make_unique<MemoryRegion>(static_cast<uint8_t *>(_ptr) + offset, size);
+ }
+ else
+ {
+ return nullptr;
+ }
}
protected:
std::shared_ptr<uint8_t> _mem;
- size_t _alignment;
- size_t _offset;
+ void *_ptr;
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_RUNTIME_MEMORY_REGION_H__ */
diff --git a/arm_compute/runtime/OffsetMemoryPool.h b/arm_compute/runtime/OffsetMemoryPool.h
index 9685fd1319..480d424b5b 100644
--- a/arm_compute/runtime/OffsetMemoryPool.h
+++ b/arm_compute/runtime/OffsetMemoryPool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,7 @@
#include "arm_compute/runtime/IMemoryPool.h"
+#include "arm_compute/runtime/IMemoryRegion.h"
#include "arm_compute/runtime/Types.h"
#include <cstddef>
@@ -47,7 +48,7 @@ public:
*/
OffsetMemoryPool(IAllocator *allocator, size_t blob_size);
/** Default Destructor */
- ~OffsetMemoryPool();
+ ~OffsetMemoryPool() = default;
/** Prevent instances of this class to be copy constructed */
OffsetMemoryPool(const OffsetMemoryPool &) = delete;
/** Prevent instances of this class to be copy assigned */
@@ -64,9 +65,9 @@ public:
std::unique_ptr<IMemoryPool> duplicate() override;
private:
- IAllocator *_allocator; /**< Allocator to use for internal allocation */
- void *_blob; /**< Memory blob */
- size_t _blob_size; /**< Sizes of the allocated memory blob */
+ IAllocator *_allocator; /**< Allocator to use for internal allocation */
+ std::unique_ptr<IMemoryRegion> _blob; /**< Memory blob */
+ size_t _blob_size; /**< Sizes of the allocated memory blob */
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_OFFSETMEMORYPOOL_H__ */
diff --git a/arm_compute/runtime/TensorAllocator.h b/arm_compute/runtime/TensorAllocator.h
index 2ad37d0576..ba9e5163ab 100644
--- a/arm_compute/runtime/TensorAllocator.h
+++ b/arm_compute/runtime/TensorAllocator.h
@@ -95,17 +95,11 @@ public:
void free() override;
/** Import an existing memory as a tensor's backing memory
*
- * @warning If the tensor is flagged to be managed by a memory manager,
- * this call will lead to an error.
- * @warning Ownership of memory depends on the way the @ref Memory object was constructed
- * @note Calling free on a tensor with imported memory will just clear
- * the internal pointer value.
- *
- * @param[in] memory Memory to import
+ * @warning ownership of memory is not transferred
*
* @return error status
*/
- arm_compute::Status import_memory(Memory memory);
+ arm_compute::Status import_memory(void *memory, size_t size);
/** Associates the tensor with a memory group
*
* @param[in] associated_memory_group Memory group to associate the tensor with
diff --git a/arm_compute/runtime/Types.h b/arm_compute/runtime/Types.h
index 9916e6d47b..b962427ef8 100644
--- a/arm_compute/runtime/Types.h
+++ b/arm_compute/runtime/Types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,8 @@
#ifndef __ARM_COMPUTE_RUNTIME_TYPES_H__
#define __ARM_COMPUTE_RUNTIME_TYPES_H__
+#include "arm_compute/runtime/IMemory.h"
+
#include <map>
namespace arm_compute
@@ -40,7 +42,7 @@ enum class MappingType
*
* @note All objects are pre-pinned to specific buffers to avoid any relevant overheads
*/
-using MemoryMappings = std::map<void **, size_t>;
+using MemoryMappings = std::map<IMemory *, size_t>;
/** A map of the groups and memory mappings */
using GroupMappings = std::map<size_t, MemoryMappings>;
diff --git a/src/core/CL/OpenCL.cpp b/src/core/CL/OpenCL.cpp
index 486bb6a1bd..6725f36a5d 100644
--- a/src/core/CL/OpenCL.cpp
+++ b/src/core/CL/OpenCL.cpp
@@ -106,6 +106,7 @@ bool CLSymbols::load(const std::string &library)
LOAD_FUNCTION_PTR(clReleaseMemObject, handle);
LOAD_FUNCTION_PTR(clGetDeviceInfo, handle);
LOAD_FUNCTION_PTR(clGetDeviceIDs, handle);
+ LOAD_FUNCTION_PTR(clGetMemObjectInfo, handle);
LOAD_FUNCTION_PTR(clRetainEvent, handle);
LOAD_FUNCTION_PTR(clGetPlatformIDs, handle);
LOAD_FUNCTION_PTR(clGetKernelWorkGroupInfo, handle);
@@ -796,6 +797,24 @@ cl_int clGetDeviceInfo(cl_device_id device,
}
}
+cl_int clGetMemObjectInfo(cl_mem memobj,
+ cl_mem_info param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret)
+{
+ arm_compute::CLSymbols::get().load_default();
+ auto func = arm_compute::CLSymbols::get().clGetMemObjectInfo_ptr;
+ if(func != nullptr)
+ {
+ return func(memobj, param_name, param_value_size, param_value, param_value_size_ret);
+ }
+ else
+ {
+ return CL_OUT_OF_RESOURCES;
+ }
+}
+
cl_int clRetainEvent(cl_event event)
{
arm_compute::CLSymbols::get().load_default();
diff --git a/src/runtime/BlobMemoryPool.cpp b/src/runtime/BlobMemoryPool.cpp
index 29505e57fc..e09451cd62 100644
--- a/src/runtime/BlobMemoryPool.cpp
+++ b/src/runtime/BlobMemoryPool.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,7 +52,7 @@ void BlobMemoryPool::acquire(MemoryMappings &handles)
for(auto &handle : handles)
{
ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
- *handle.first = _blobs[handle.second];
+ handle.first->set_region(_blobs[handle.second].get());
}
}
@@ -61,7 +61,7 @@ void BlobMemoryPool::release(MemoryMappings &handles)
for(auto &handle : handles)
{
ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
- *handle.first = nullptr;
+ handle.first->set_region(nullptr);
}
}
@@ -82,17 +82,11 @@ void BlobMemoryPool::allocate_blobs(const std::vector<size_t> &sizes)
for(const auto &size : sizes)
{
- _blobs.push_back(_allocator->allocate(size, 0));
+ _blobs.push_back(_allocator->make_region(size, 0));
}
}
void BlobMemoryPool::free_blobs()
{
- ARM_COMPUTE_ERROR_ON(!_allocator);
-
- for(auto &blob : _blobs)
- {
- _allocator->free(blob);
- }
_blobs.clear();
} \ No newline at end of file
diff --git a/src/runtime/CL/CLMemory.cpp b/src/runtime/CL/CLMemory.cpp
index bbc513d783..5bea85cfae 100644
--- a/src/runtime/CL/CLMemory.cpp
+++ b/src/runtime/CL/CLMemory.cpp
@@ -24,23 +24,20 @@
#include "arm_compute/runtime/CL/CLMemory.h"
#include "arm_compute/core/Error.h"
+#include "arm_compute/core/utils/misc/Cast.h"
namespace arm_compute
{
CLMemory::CLMemory()
: _region(nullptr), _region_owned(nullptr)
{
- create_empty_region();
}
CLMemory::CLMemory(std::shared_ptr<ICLMemoryRegion> memory)
: _region(nullptr), _region_owned(std::move(memory))
{
- if(_region_owned == nullptr)
- {
- create_empty_region();
- }
- _region = _region_owned.get();
+ _region_owned = memory;
+ _region = _region_owned.get();
}
CLMemory::CLMemory(ICLMemoryRegion *memory)
@@ -49,19 +46,36 @@ CLMemory::CLMemory(ICLMemoryRegion *memory)
_region = memory;
}
-ICLMemoryRegion *CLMemory::region()
+ICLMemoryRegion *CLMemory::cl_region()
+{
+ return _region;
+}
+
+ICLMemoryRegion *CLMemory::cl_region() const
+{
+ return _region;
+}
+
+IMemoryRegion *CLMemory::region()
{
return _region;
}
-ICLMemoryRegion *CLMemory::region() const
+IMemoryRegion *CLMemory::region() const
{
return _region;
}
-void CLMemory::create_empty_region()
+void CLMemory::set_region(IMemoryRegion *region)
+{
+ auto cl_region = utils::cast::polymorphic_downcast<ICLMemoryRegion *>(region);
+ _region_owned = nullptr;
+ _region = cl_region;
+}
+
+void CLMemory::set_owned_region(std::unique_ptr<IMemoryRegion> region)
{
- _region_owned = std::make_shared<CLBufferMemoryRegion>(cl::Context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, 0);
+ _region_owned = utils::cast::polymorphic_downcast_unique_ptr<ICLMemoryRegion>(std::move(region));
_region = _region_owned.get();
}
} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CL/CLMemoryRegion.cpp b/src/runtime/CL/CLMemoryRegion.cpp
index 15fd7f333e..9578d73934 100644
--- a/src/runtime/CL/CLMemoryRegion.cpp
+++ b/src/runtime/CL/CLMemoryRegion.cpp
@@ -48,9 +48,10 @@ void *ICLMemoryRegion::buffer() const
return _mapping;
}
-void **ICLMemoryRegion::handle()
+std::unique_ptr<IMemoryRegion> ICLMemoryRegion::extract_subregion(size_t offset, size_t size)
{
- return reinterpret_cast<void **>(&_mem);
+ ARM_COMPUTE_UNUSED(offset, size);
+ return nullptr;
}
CLBufferMemoryRegion::CLBufferMemoryRegion(cl::Context ctx, cl_mem_flags flags, size_t size)
@@ -62,6 +63,12 @@ CLBufferMemoryRegion::CLBufferMemoryRegion(cl::Context ctx, cl_mem_flags flags,
}
}
+CLBufferMemoryRegion::CLBufferMemoryRegion(const cl::Buffer &buffer)
+ : ICLMemoryRegion(buffer.getInfo<CL_MEM_CONTEXT>(), buffer.getInfo<CL_MEM_SIZE>())
+{
+ _mem = buffer;
+}
+
void *CLBufferMemoryRegion::ptr()
{
return nullptr;
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index dd716f77ff..0307498335 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -28,86 +28,87 @@
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-using namespace arm_compute;
+namespace arm_compute
+{
+const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer();
namespace
{
-std::shared_ptr<arm_compute::ICLMemoryRegion> allocate_region(cl::Context context, size_t size, cl_uint alignment)
+std::unique_ptr<ICLMemoryRegion> allocate_region(cl::Context context, size_t size, cl_uint alignment)
{
// Try fine-grain SVM
- std::shared_ptr<ICLMemoryRegion> region = std::make_shared<CLFineSVMMemoryRegion>(context, CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER, size, alignment);
+ std::unique_ptr<ICLMemoryRegion> region = support::cpp14::make_unique<CLFineSVMMemoryRegion>(context,
+ CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER,
+ size,
+ alignment);
// Try coarse-grain SVM in case of failure
if(region != nullptr && region->ptr() == nullptr)
{
- region = std::make_shared<CLCoarseSVMMemoryRegion>(context, CL_MEM_READ_WRITE, size, alignment);
+ region = support::cpp14::make_unique<CLCoarseSVMMemoryRegion>(context, CL_MEM_READ_WRITE, size, alignment);
}
// Try legacy buffer memory in case of failure
if(region != nullptr && region->ptr() == nullptr)
{
- region = std::make_shared<CLBufferMemoryRegion>(context, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
+ region = support::cpp14::make_unique<CLBufferMemoryRegion>(context, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
}
return region;
}
} // namespace
CLTensorAllocator::CLTensorAllocator(CLTensor *owner)
- : _associated_memory_group(nullptr), _memory(), _owner(owner)
+ : _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _owner(owner)
{
}
uint8_t *CLTensorAllocator::data()
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
+ return _mapping;
}
const cl::Buffer &CLTensorAllocator::cl_data() const
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- return _memory.region()->cl_data();
+ return _memory.region() == nullptr ? _empty_buffer : _memory.cl_region()->cl_data();
}
void CLTensorAllocator::allocate()
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
-
if(_associated_memory_group == nullptr)
{
- if(_memory.region()->cl_data().get() != nullptr)
+ if(_memory.region() != nullptr && _memory.cl_region()->cl_data().get() != nullptr)
{
// Memory is already allocated. Reuse it if big enough, otherwise fire an assertion
- ARM_COMPUTE_ERROR_ON_MSG(info().total_size() > _memory.region()->size(), "Reallocation of a bigger memory region is not allowed!");
+ ARM_COMPUTE_ERROR_ON_MSG(info().total_size() > _memory.region()->size(),
+ "Reallocation of a bigger memory region is not allowed!");
}
else
{
// Perform memory allocation
- _memory = CLMemory(allocate_region(CLScheduler::get().context(), info().total_size(), 0));
+ _memory.set_owned_region(allocate_region(CLScheduler::get().context(), info().total_size(), 0));
}
}
else
{
- _associated_memory_group->finalize_memory(_owner, _memory.region()->handle(), info().total_size());
- _memory.region()->set_size(info().total_size());
+ _associated_memory_group->finalize_memory(_owner, _memory, info().total_size());
}
info().set_is_resizable(false);
}
void CLTensorAllocator::free()
{
- if(_associated_memory_group == nullptr)
- {
- _memory = CLMemory();
- info().set_is_resizable(true);
- }
+ _mapping = nullptr;
+ _memory.set_region(nullptr);
+ info().set_is_resizable(true);
}
-arm_compute::Status CLTensorAllocator::import_memory(CLMemory memory)
+arm_compute::Status CLTensorAllocator::import_memory(cl::Buffer buffer)
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- ARM_COMPUTE_RETURN_ERROR_ON(memory.region()->cl_data().get() == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(buffer.get() == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_SIZE>() == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_CONTEXT>().get() != CLScheduler::get().context().get());
ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
- _memory = memory;
+
+ _memory.set_owned_region(support::cpp14::make_unique<CLBufferMemoryRegion>(buffer));
info().set_is_resizable(false);
return Status{};
@@ -115,11 +116,10 @@ arm_compute::Status CLTensorAllocator::import_memory(CLMemory memory)
void CLTensorAllocator::set_associated_memory_group(CLMemoryGroup *associated_memory_group)
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
- ARM_COMPUTE_ERROR_ON(_memory.region()->cl_data().get() != nullptr);
- _memory = CLMemory(std::make_shared<CLBufferMemoryRegion>(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, 0));
+ ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.cl_region()->cl_data().get() != nullptr);
+
_associated_memory_group = associated_memory_group;
}
@@ -136,16 +136,23 @@ void CLTensorAllocator::unlock()
uint8_t *CLTensorAllocator::map(cl::CommandQueue &q, bool blocking)
{
+ ARM_COMPUTE_ERROR_ON(_mapping != nullptr);
ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
- _memory.region()->map(q, blocking);
- return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
+
+ _mapping = reinterpret_cast<uint8_t *>(_memory.cl_region()->map(q, blocking));
+ return _mapping;
}
void CLTensorAllocator::unmap(cl::CommandQueue &q, uint8_t *mapping)
{
- ARM_COMPUTE_UNUSED(mapping);
+ ARM_COMPUTE_ERROR_ON(_mapping == nullptr);
+ ARM_COMPUTE_ERROR_ON(_mapping != mapping);
ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() == nullptr);
- _memory.region()->unmap(q);
+ ARM_COMPUTE_UNUSED(mapping);
+
+ _memory.cl_region()->unmap(q);
+ _mapping = nullptr;
}
+} // namespace arm_compute
diff --git a/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp b/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp
index cdd12c3ad5..70a1f4f8ff 100644
--- a/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp
+++ b/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp
@@ -22,10 +22,10 @@
* SOFTWARE.
*/
#include "arm_compute/runtime/GLES_COMPUTE/GCBufferAllocator.h"
-#include "arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/GLES_COMPUTE/OpenGLES.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
#include <cstddef>
@@ -34,24 +34,26 @@ namespace arm_compute
void *GCBufferAllocator::allocate(size_t size, size_t alignment)
{
ARM_COMPUTE_UNUSED(alignment);
- auto *gl_buffer = new GLBufferWrapper();
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, gl_buffer->_ssbo_name));
+
+ auto *gl_ssbo_name = new GLuint;
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, *gl_ssbo_name));
ARM_COMPUTE_GL_CHECK(glBufferData(GL_SHADER_STORAGE_BUFFER, static_cast<GLsizeiptr>(size), nullptr, GL_STATIC_DRAW));
ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
- return reinterpret_cast<void *>(gl_buffer);
+ return reinterpret_cast<void *>(gl_ssbo_name);
}
void GCBufferAllocator::free(void *ptr)
{
ARM_COMPUTE_ERROR_ON(ptr == nullptr);
- auto *gl_buffer = reinterpret_cast<GLBufferWrapper *>(ptr);
- delete gl_buffer;
+ auto *gl_ssbo_name = reinterpret_cast<GLuint *>(ptr);
+ ARM_COMPUTE_GL_CHECK(glDeleteBuffers(1, gl_ssbo_name));
+ delete gl_ssbo_name;
}
std::unique_ptr<IMemoryRegion> GCBufferAllocator::make_region(size_t size, size_t alignment)
{
- ARM_COMPUTE_UNUSED(size, alignment);
- return nullptr;
+ ARM_COMPUTE_UNUSED(alignment);
+ return arm_compute::support::cpp14::make_unique<GCBufferMemoryRegion>(size);
}
} // namespace arm_compute
diff --git a/src/runtime/GLES_COMPUTE/GCMemory.cpp b/src/runtime/GLES_COMPUTE/GCMemory.cpp
new file mode 100644
index 0000000000..fed4a158a3
--- /dev/null
+++ b/src/runtime/GLES_COMPUTE/GCMemory.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemory.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
+
+namespace arm_compute
+{
+GCMemory::GCMemory()
+ : _region(nullptr), _region_owned(nullptr)
+{
+}
+
+GCMemory::GCMemory(std::shared_ptr<IGCMemoryRegion> memory)
+ : _region(nullptr), _region_owned(std::move(memory))
+{
+ _region_owned = memory;
+ _region = _region_owned.get();
+}
+
+GCMemory::GCMemory(IGCMemoryRegion *memory)
+ : _region(memory), _region_owned(nullptr)
+{
+ _region = memory;
+}
+
+IGCMemoryRegion *GCMemory::gc_region()
+{
+ return _region;
+}
+
+IGCMemoryRegion *GCMemory::gc_region() const
+{
+ return _region;
+}
+
+IMemoryRegion *GCMemory::region()
+{
+ return _region;
+}
+
+IMemoryRegion *GCMemory::region() const
+{
+ return _region;
+}
+
+void GCMemory::set_region(IMemoryRegion *region)
+{
+ auto gc_region = utils::cast::polymorphic_downcast<IGCMemoryRegion *>(region);
+ _region_owned = nullptr;
+ _region = gc_region;
+}
+
+void GCMemory::set_owned_region(std::unique_ptr<IMemoryRegion> region)
+{
+ _region_owned = utils::cast::polymorphic_downcast_unique_ptr<IGCMemoryRegion>(std::move(region));
+ _region = _region_owned.get();
+}
+} // namespace arm_compute
diff --git a/src/runtime/GLES_COMPUTE/GCMemoryRegion.cpp b/src/runtime/GLES_COMPUTE/GCMemoryRegion.cpp
new file mode 100644
index 0000000000..45fd6e8944
--- /dev/null
+++ b/src/runtime/GLES_COMPUTE/GCMemoryRegion.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
+
+#include "arm_compute/core/Error.h"
+
+namespace arm_compute
+{
+IGCMemoryRegion::IGCMemoryRegion(size_t size)
+ : IMemoryRegion(size), _mapping(nullptr), _ssbo_name(0)
+{
+}
+
+const GLuint &IGCMemoryRegion::gc_ssbo_name() const
+{
+ return _ssbo_name;
+}
+
+void *IGCMemoryRegion::buffer()
+{
+ return _mapping;
+}
+
+void *IGCMemoryRegion::buffer() const
+{
+ return _mapping;
+}
+
+GCBufferMemoryRegion::GCBufferMemoryRegion(size_t size)
+ : IGCMemoryRegion(size)
+{
+ ARM_COMPUTE_GL_CHECK(glGenBuffers(1, &_ssbo_name));
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _ssbo_name));
+ ARM_COMPUTE_GL_CHECK(glBufferData(GL_SHADER_STORAGE_BUFFER, static_cast<GLsizeiptr>(size), nullptr, GL_STATIC_DRAW));
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
+}
+
+GCBufferMemoryRegion::~GCBufferMemoryRegion()
+{
+ ARM_COMPUTE_GL_CHECK(glDeleteBuffers(1, &_ssbo_name));
+}
+
+void *GCBufferMemoryRegion::ptr()
+{
+ return nullptr;
+}
+
+void *GCBufferMemoryRegion::map(bool blocking)
+{
+ ARM_COMPUTE_ERROR_ON(_mapping != nullptr);
+ ARM_COMPUTE_UNUSED(blocking);
+
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _ssbo_name));
+ void *p = ARM_COMPUTE_GL_CHECK(glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, static_cast<GLsizeiptr>(size()), GL_MAP_READ_BIT | GL_MAP_WRITE_BIT));
+ _mapping = reinterpret_cast<uint8_t *>(p);
+
+ return _mapping;
+}
+
+void GCBufferMemoryRegion::unmap()
+{
+ ARM_COMPUTE_ERROR_ON(_mapping == nullptr);
+
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _ssbo_name));
+ ARM_COMPUTE_GL_CHECK(glUnmapBuffer(GL_SHADER_STORAGE_BUFFER));
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
+ _mapping = nullptr;
+}
+
+std::unique_ptr<IMemoryRegion> GCBufferMemoryRegion::extract_subregion(size_t offset, size_t size)
+{
+ ARM_COMPUTE_UNUSED(offset, size);
+ return nullptr;
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp b/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp
index abd2b483d3..a0dd540a7c 100644
--- a/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp
+++ b/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp
@@ -26,21 +26,17 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
#include "support/ToolchainSupport.h"
using namespace arm_compute;
GCTensorAllocator::GCTensorAllocator(GCTensor *owner)
- : _associated_memory_group(nullptr), _gl_buffer(), _mapping(nullptr), _owner(owner)
+ : _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _owner(owner)
{
}
-GCTensorAllocator::~GCTensorAllocator()
-{
- _gl_buffer = support::cpp14::make_unique<GLBufferWrapper>();
-}
-
uint8_t *GCTensorAllocator::data()
{
return _mapping;
@@ -50,32 +46,28 @@ void GCTensorAllocator::allocate()
{
if(_associated_memory_group == nullptr)
{
- _gl_buffer = support::cpp14::make_unique<GLBufferWrapper>();
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _gl_buffer->_ssbo_name));
- ARM_COMPUTE_GL_CHECK(glBufferData(GL_SHADER_STORAGE_BUFFER, static_cast<GLsizeiptr>(info().total_size()), nullptr, GL_STATIC_DRAW));
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
+ _memory.set_owned_region(support::cpp14::make_unique<GCBufferMemoryRegion>(info().total_size()));
}
else
{
- _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(&_gl_buffer), info().total_size());
+ _associated_memory_group->finalize_memory(_owner, _memory, info().total_size());
}
info().set_is_resizable(false);
}
void GCTensorAllocator::free()
{
- if(_associated_memory_group == nullptr)
- {
- _gl_buffer.reset();
- info().set_is_resizable(true);
- }
+ _mapping = nullptr;
+ _memory.set_region(nullptr);
+ info().set_is_resizable(true);
}
void GCTensorAllocator::set_associated_memory_group(GCMemoryGroup *associated_memory_group)
{
ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
- ARM_COMPUTE_ERROR_ON(_gl_buffer.get() != nullptr);
+ ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.gc_region()->gc_ssbo_name() != 0);
+
_associated_memory_group = associated_memory_group;
}
@@ -91,27 +83,23 @@ void GCTensorAllocator::unlock()
GLuint GCTensorAllocator::get_gl_ssbo_name() const
{
- return _gl_buffer->_ssbo_name;
+ return (_memory.region() == nullptr) ? static_cast<GLuint>(0) : _memory.gc_region()->gc_ssbo_name();
}
uint8_t *GCTensorAllocator::map(bool blocking)
{
ARM_COMPUTE_ERROR_ON(_mapping != nullptr);
- ARM_COMPUTE_UNUSED(blocking);
-
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _gl_buffer->_ssbo_name));
- void *p = ARM_COMPUTE_GL_CHECK(glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, static_cast<GLsizeiptr>(info().total_size()), GL_MAP_READ_BIT | GL_MAP_WRITE_BIT));
- _mapping = reinterpret_cast<uint8_t *>(p);
+ ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
+ _mapping = reinterpret_cast<uint8_t *>(_memory.gc_region()->map(blocking));
return _mapping;
}
void GCTensorAllocator::unmap()
{
ARM_COMPUTE_ERROR_ON(_mapping == nullptr);
+ ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _gl_buffer->_ssbo_name));
- ARM_COMPUTE_GL_CHECK(glUnmapBuffer(GL_SHADER_STORAGE_BUFFER));
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
+ _memory.gc_region()->unmap();
_mapping = nullptr;
} \ No newline at end of file
diff --git a/src/runtime/ISimpleLifetimeManager.cpp b/src/runtime/ISimpleLifetimeManager.cpp
index faaff8a63e..7d928d6a7a 100644
--- a/src/runtime/ISimpleLifetimeManager.cpp
+++ b/src/runtime/ISimpleLifetimeManager.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/IAllocator.h"
+#include "arm_compute/runtime/IMemory.h"
#include "arm_compute/runtime/IMemoryGroup.h"
#include "arm_compute/runtime/IMemoryPool.h"
#include "support/ToolchainSupport.h"
@@ -70,7 +71,7 @@ void ISimpleLifetimeManager::start_lifetime(void *obj)
_active_elements.insert(std::make_pair(obj, obj));
}
-void ISimpleLifetimeManager::end_lifetime(void *obj, void **handle, size_t size)
+void ISimpleLifetimeManager::end_lifetime(void *obj, IMemory &obj_memory, size_t size)
{
ARM_COMPUTE_ERROR_ON(obj == nullptr);
@@ -80,7 +81,7 @@ void ISimpleLifetimeManager::end_lifetime(void *obj, void **handle, size_t size)
// Update object fields and mark object as complete
Element &el = active_object_it->second;
- el.handle = handle;
+ el.handle = &obj_memory;
el.size = size;
el.status = true;
diff --git a/src/runtime/Memory.cpp b/src/runtime/Memory.cpp
index 15bbb17675..d116624679 100644
--- a/src/runtime/Memory.cpp
+++ b/src/runtime/Memory.cpp
@@ -30,17 +30,13 @@ namespace arm_compute
Memory::Memory()
: _region(nullptr), _region_owned(nullptr)
{
- create_empty_region();
}
Memory::Memory(std::shared_ptr<IMemoryRegion> memory)
: _region(nullptr), _region_owned(std::move(memory))
{
- if(_region_owned == nullptr)
- {
- create_empty_region();
- }
- _region = _region_owned.get();
+ _region_owned = memory;
+ _region = _region_owned.get();
}
Memory::Memory(IMemoryRegion *memory)
@@ -59,9 +55,15 @@ IMemoryRegion *Memory::region() const
return _region;
}
-void Memory::create_empty_region()
+void Memory::set_region(IMemoryRegion *region)
+{
+ _region_owned = nullptr;
+ _region = region;
+}
+
+void Memory::set_owned_region(std::unique_ptr<IMemoryRegion> region)
{
- _region_owned = std::make_shared<MemoryRegion>(0);
+ _region_owned = std::move(region);
_region = _region_owned.get();
}
} // namespace arm_compute
diff --git a/src/runtime/OffsetMemoryPool.cpp b/src/runtime/OffsetMemoryPool.cpp
index 96f54f890f..36eaf0ba1a 100644
--- a/src/runtime/OffsetMemoryPool.cpp
+++ b/src/runtime/OffsetMemoryPool.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,6 +28,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/IAllocator.h"
#include "arm_compute/runtime/IMemoryPool.h"
+#include "arm_compute/runtime/MemoryRegion.h"
#include "arm_compute/runtime/Types.h"
#include "support/ToolchainSupport.h"
@@ -37,14 +38,7 @@ OffsetMemoryPool::OffsetMemoryPool(IAllocator *allocator, size_t blob_size)
: _allocator(allocator), _blob(), _blob_size(blob_size)
{
ARM_COMPUTE_ERROR_ON(!allocator);
- _blob = _allocator->allocate(_blob_size, 0);
-}
-
-OffsetMemoryPool::~OffsetMemoryPool()
-{
- ARM_COMPUTE_ERROR_ON(!_allocator);
- _allocator->free(_blob);
- _blob = nullptr;
+ _blob = _allocator->make_region(blob_size, 0);
}
void OffsetMemoryPool::acquire(MemoryMappings &handles)
@@ -55,7 +49,7 @@ void OffsetMemoryPool::acquire(MemoryMappings &handles)
for(auto &handle : handles)
{
ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
- *handle.first = reinterpret_cast<uint8_t *>(_blob) + handle.second;
+ handle.first->set_owned_region(_blob->extract_subregion(handle.second, _blob_size - handle.second));
}
}
@@ -64,7 +58,7 @@ void OffsetMemoryPool::release(MemoryMappings &handles)
for(auto &handle : handles)
{
ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
- *handle.first = nullptr;
+ handle.first->set_region(nullptr);
}
}
diff --git a/src/runtime/TensorAllocator.cpp b/src/runtime/TensorAllocator.cpp
index c84a2719d8..5fa51d7140 100644
--- a/src/runtime/TensorAllocator.cpp
+++ b/src/runtime/TensorAllocator.cpp
@@ -127,39 +127,35 @@ void TensorAllocator::init(const TensorAllocator &allocator, const Coordinates &
uint8_t *TensorAllocator::data() const
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
+ return (_memory.region() == nullptr) ? nullptr : reinterpret_cast<uint8_t *>(_memory.region()->buffer());
}
void TensorAllocator::allocate()
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
-
if(_associated_memory_group == nullptr)
{
- _memory = Memory(std::make_shared<MemoryRegion>(info().total_size(), alignment()));
+ _memory.set_owned_region(support::cpp14::make_unique<MemoryRegion>(info().total_size(), alignment()));
}
else
{
- _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(_memory.region()->handle()), info().total_size());
- _memory.region()->set_size(info().total_size());
+ _associated_memory_group->finalize_memory(_owner, _memory, info().total_size());
}
info().set_is_resizable(false);
}
void TensorAllocator::free()
{
- _memory = Memory();
+ _memory.set_region(nullptr);
info().set_is_resizable(true);
}
-arm_compute::Status TensorAllocator::import_memory(Memory memory)
+arm_compute::Status TensorAllocator::import_memory(void *memory, size_t size)
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- ARM_COMPUTE_RETURN_ERROR_ON(memory.region()->buffer() == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(memory == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(size == 0);
ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
- _memory = memory;
+
+ _memory.set_owned_region(support::cpp14::make_unique<MemoryRegion>(memory, info().total_size()));
info().set_is_resizable(false);
return Status{};
@@ -167,10 +163,10 @@ arm_compute::Status TensorAllocator::import_memory(Memory memory)
void TensorAllocator::set_associated_memory_group(MemoryGroup *associated_memory_group)
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
- ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
+ ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.region()->buffer() != nullptr);
+
_associated_memory_group = associated_memory_group;
}
diff --git a/tests/validation/CL/UNIT/TensorAllocator.cpp b/tests/validation/CL/UNIT/TensorAllocator.cpp
index a34a37eb7b..849eee84d0 100644
--- a/tests/validation/CL/UNIT/TensorAllocator.cpp
+++ b/tests/validation/CL/UNIT/TensorAllocator.cpp
@@ -45,31 +45,32 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
// Init tensor info
TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
- // Allocate memory
- auto buf = std::make_shared<CLBufferMemoryRegion>(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, info.total_size());
+ // Allocate memory buffer
+ const size_t total_size = info.total_size();
+ auto buf = cl::Buffer(CLScheduler::get().context(), CL_MEM_READ_WRITE, total_size);
- // Negative case : Import empty memory
+ // Negative case : Import nullptr
CLTensor t1;
t1.allocator()->init(info);
- ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(CLMemory())), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(cl::Buffer())), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS);
// Negative case : Import memory to a tensor that is memory managed
CLTensor t2;
CLMemoryGroup mg;
t2.allocator()->set_associated_memory_group(&mg);
- ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(CLMemory(buf))), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS);
- // Positive case : Set managed pointer
+ // Positive case : Set raw pointer
CLTensor t3;
t3.allocator()->init(info);
- ARM_COMPUTE_EXPECT(bool(t3.allocator()->import_memory(CLMemory(buf))), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(t3.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!t3.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.cl_buffer().get() == buf->cl_data().get(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(t3.cl_buffer().get() == buf.get(), framework::LogLevel::ERRORS);
t3.allocator()->free();
ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.buffer() == nullptr, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(t3.cl_buffer().get() != buf.get(), framework::LogLevel::ERRORS);
}
TEST_SUITE_END()
diff --git a/tests/validation/NEON/UNIT/TensorAllocator.cpp b/tests/validation/NEON/UNIT/TensorAllocator.cpp
index 7781107210..384a00855b 100644
--- a/tests/validation/NEON/UNIT/TensorAllocator.cpp
+++ b/tests/validation/NEON/UNIT/TensorAllocator.cpp
@@ -49,37 +49,34 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
// Allocate memory buffer
- auto buf = std::make_shared<MemoryRegion>(info.total_size());
+ const size_t total_size = info.total_size();
+ auto data = support::cpp14::make_unique<uint8_t[]>(total_size);
- // Negative case : Import empty memory
+ // Negative case : Import pointer with zero size
Tensor t1;
t1.allocator()->init(info);
- ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(Memory())), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(data.get(), 0)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS);
- // Negative case : Import memory to a tensor that is memory managed
- Tensor t2;
- MemoryGroup mg;
- t2.allocator()->set_associated_memory_group(&mg);
- ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(Memory(buf.get()))), framework::LogLevel::ERRORS);
+ // Negative case : Import nullptr
+ Tensor t2;
+ t2.allocator()->init(info);
+ ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(nullptr, total_size)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS);
- // Positive case : Set raw pointer
- Tensor t3;
- t3.allocator()->init(info);
- ARM_COMPUTE_EXPECT(bool(t3.allocator()->import_memory(Memory(buf.get()))), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!t3.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.buffer() == reinterpret_cast<uint8_t *>(buf->buffer()), framework::LogLevel::ERRORS);
- t3.allocator()->free();
+ // Negative case : Import memory to a tensor that is memory managed
+ Tensor t3;
+ MemoryGroup mg;
+ t3.allocator()->set_associated_memory_group(&mg);
+ ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(data.get(), total_size)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.buffer() == nullptr, framework::LogLevel::ERRORS);
- // Positive case : Set managed pointer
+ // Positive case : Set raw pointer
Tensor t4;
t4.allocator()->init(info);
- ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(Memory(buf))), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(data.get(), total_size)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t4.buffer() == reinterpret_cast<uint8_t *>(buf->buffer()), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()), framework::LogLevel::ERRORS);
t4.allocator()->free();
ARM_COMPUTE_EXPECT(t4.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t4.buffer() == nullptr, framework::LogLevel::ERRORS);