aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/CL/CLTensor.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/runtime/CL/CLTensor.h')
-rw-r--r--arm_compute/runtime/CL/CLTensor.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/arm_compute/runtime/CL/CLTensor.h b/arm_compute/runtime/CL/CLTensor.h
index 102cb3636a..0729935e9e 100644
--- a/arm_compute/runtime/CL/CLTensor.h
+++ b/arm_compute/runtime/CL/CLTensor.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 ARM Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,7 @@ public:
* @param[in] ctx (Optional) Pointer to a @ref CLRuntimeContext.
* If nullptr is passed in, the legacy api using the singletons will be used. Otherwise the memory for the
* tensor will allocate on the context passed in.
- * The singletons legacy api has been deprecated and will be removed.
+ * The singletons legacy api has been deprecated and will be removed in future releases.
*/
CLTensor(IRuntimeContext *ctx = nullptr);
@@ -87,17 +87,17 @@ public:
TensorInfo *info() override;
const cl::Buffer &cl_buffer() const override;
CLQuantization quantization() const override;
- void associate_memory_group(IMemoryGroup *memory_group) override;
+ void associate_memory_group(IMemoryGroup *memory_group) override;
CLRuntimeContext *context();
protected:
// Inherited methods overridden:
uint8_t *do_map(cl::CommandQueue &q, bool blocking) override;
- void do_unmap(cl::CommandQueue &q) override;
+ void do_unmap(cl::CommandQueue &q) override;
private:
mutable CLTensorAllocator _allocator; /**< Instance of the OpenCL tensor allocator */
- CLRuntimeContext *_ctx{ nullptr };
+ CLRuntimeContext *_ctx{nullptr};
};
/** OpenCL Image */