ArmNN
 24.02
ClTensorHandle.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
10 
11 #include <Half.hpp>
12 
14 
15 #include <arm_compute/runtime/CL/CLTensor.h>
16 #include <arm_compute/runtime/CL/CLSubTensor.h>
17 #include <arm_compute/runtime/IMemoryGroup.h>
18 #include <arm_compute/runtime/MemoryGroup.h>
19 #include <arm_compute/core/TensorShape.h>
20 #include <arm_compute/core/Coordinates.h>
21 
23 
24 namespace armnn
25 {
26 class ClTensorHandleDecorator;
27 
29 {
30 public:
31  ClTensorHandle(const TensorInfo& tensorInfo)
32  : m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Undefined)),
33  m_Imported(false),
34  m_IsImportEnabled(false)
35  {
36  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
37  }
38 
39  ClTensorHandle(const TensorInfo& tensorInfo,
40  DataLayout dataLayout,
42  : m_ImportFlags(importFlags),
43  m_Imported(false),
44  m_IsImportEnabled(false)
45  {
46  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
47  }
48 
49  arm_compute::CLTensor& GetTensor() override { return m_Tensor; }
50  arm_compute::CLTensor const& GetTensor() const override { return m_Tensor; }
51  virtual void Allocate() override
52  {
53  // If we have enabled Importing, don't allocate the tensor
54  if (m_IsImportEnabled)
55  {
56  throw MemoryImportException("ClTensorHandle::Attempting to allocate memory when importing");
57  }
58  else
59  {
60  armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
61  }
62 
63  }
64 
65  virtual void Manage() override
66  {
67  // If we have enabled Importing, don't manage the tensor
68  if (m_IsImportEnabled)
69  {
70  throw MemoryImportException("ClTensorHandle::Attempting to manage memory when importing");
71  }
72  else
73  {
74  assert(m_MemoryGroup != nullptr);
75  m_MemoryGroup->manage(&m_Tensor);
76  }
77  }
78 
79  virtual const void* Map(bool blocking = true) const override
80  {
81  const_cast<arm_compute::CLTensor*>(&m_Tensor)->map(blocking);
82  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
83  }
84 
85  virtual void Unmap() const override { const_cast<arm_compute::CLTensor*>(&m_Tensor)->unmap(); }
86 
87  virtual ITensorHandle* GetParent() const override { return nullptr; }
88 
89  virtual arm_compute::DataType GetDataType() const override
90  {
91  return m_Tensor.info()->data_type();
92  }
93 
94  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup) override
95  {
96  m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
97  }
98 
99  TensorShape GetStrides() const override
100  {
101  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
102  }
103 
104  TensorShape GetShape() const override
105  {
106  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
107  }
108 
110  {
111  m_ImportFlags = importFlags;
112  }
113 
115  {
116  return m_ImportFlags;
117  }
118 
119  void SetImportEnabledFlag(bool importEnabledFlag)
120  {
121  m_IsImportEnabled = importEnabledFlag;
122  }
123 
124  virtual bool Import(void* memory, MemorySource source) override
125  {
126  armnn::IgnoreUnused(memory);
127  if (m_ImportFlags& static_cast<MemorySourceFlags>(source))
128  {
129  throw MemoryImportException("ClTensorHandle::Incorrect import flag");
130  }
131  m_Imported = false;
132  return false;
133  }
134 
135  virtual bool CanBeImported(void* memory, MemorySource source) override
136  {
137  // This TensorHandle can never import.
138  armnn::IgnoreUnused(memory, source);
139  return false;
140  }
141 
142  virtual std::shared_ptr<ITensorHandle> DecorateTensorHandle(const TensorInfo& tensorInfo) override;
143 
144 private:
145  // Only used for testing
146  void CopyOutTo(void* memory) const override
147  {
148  const_cast<armnn::ClTensorHandle*>(this)->Map(true);
149  switch(this->GetDataType())
150  {
151  case arm_compute::DataType::F32:
152  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
153  static_cast<float*>(memory));
154  break;
155  case arm_compute::DataType::U8:
156  case arm_compute::DataType::QASYMM8:
157  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
158  static_cast<uint8_t*>(memory));
159  break;
160  case arm_compute::DataType::QSYMM8:
161  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
162  case arm_compute::DataType::QASYMM8_SIGNED:
163  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
164  static_cast<int8_t*>(memory));
165  break;
166  case arm_compute::DataType::F16:
167  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
168  static_cast<armnn::Half*>(memory));
169  break;
170  case arm_compute::DataType::S16:
171  case arm_compute::DataType::QSYMM16:
172  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
173  static_cast<int16_t*>(memory));
174  break;
175  case arm_compute::DataType::S32:
176  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
177  static_cast<int32_t*>(memory));
178  break;
179  default:
180  {
182  }
183  }
184  const_cast<armnn::ClTensorHandle*>(this)->Unmap();
185  }
186 
187  // Only used for testing
188  void CopyInFrom(const void* memory) override
189  {
190  this->Map(true);
191  switch(this->GetDataType())
192  {
193  case arm_compute::DataType::F32:
194  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
195  this->GetTensor());
196  break;
197  case arm_compute::DataType::U8:
198  case arm_compute::DataType::QASYMM8:
199  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
200  this->GetTensor());
201  break;
202  case arm_compute::DataType::F16:
203  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
204  this->GetTensor());
205  break;
206  case arm_compute::DataType::S16:
207  case arm_compute::DataType::QSYMM8:
208  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
209  case arm_compute::DataType::QASYMM8_SIGNED:
210  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
211  this->GetTensor());
212  break;
213  case arm_compute::DataType::QSYMM16:
214  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
215  this->GetTensor());
216  break;
217  case arm_compute::DataType::S32:
218  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
219  this->GetTensor());
220  break;
221  default:
222  {
224  }
225  }
226  this->Unmap();
227  }
228 
229  arm_compute::CLTensor m_Tensor;
230  std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
231  MemorySourceFlags m_ImportFlags;
232  bool m_Imported;
233  bool m_IsImportEnabled;
234  std::vector<std::shared_ptr<ClTensorHandleDecorator>> m_Decorated;
235 };
236 
238 {
239 public:
241  const arm_compute::TensorShape& shape,
242  const arm_compute::Coordinates& coords)
243  : m_Tensor(&parent->GetTensor(), shape, coords)
244  {
245  parentHandle = parent;
246  }
247 
248  arm_compute::CLSubTensor& GetTensor() override { return m_Tensor; }
249  arm_compute::CLSubTensor const& GetTensor() const override { return m_Tensor; }
250 
251  virtual void Allocate() override {}
252  virtual void Manage() override {}
253 
254  virtual const void* Map(bool blocking = true) const override
255  {
256  const_cast<arm_compute::CLSubTensor*>(&m_Tensor)->map(blocking);
257  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
258  }
259  virtual void Unmap() const override { const_cast<arm_compute::CLSubTensor*>(&m_Tensor)->unmap(); }
260 
261  virtual ITensorHandle* GetParent() const override { return parentHandle; }
262 
263  virtual arm_compute::DataType GetDataType() const override
264  {
265  return m_Tensor.info()->data_type();
266  }
267 
268  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>&) override {}
269 
270  TensorShape GetStrides() const override
271  {
272  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
273  }
274 
275  TensorShape GetShape() const override
276  {
277  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
278  }
279 
280 private:
281  // Only used for testing
282  void CopyOutTo(void* memory) const override
283  {
284  const_cast<ClSubTensorHandle*>(this)->Map(true);
285  switch(this->GetDataType())
286  {
287  case arm_compute::DataType::F32:
288  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
289  static_cast<float*>(memory));
290  break;
291  case arm_compute::DataType::U8:
292  case arm_compute::DataType::QASYMM8:
293  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
294  static_cast<uint8_t*>(memory));
295  break;
296  case arm_compute::DataType::F16:
297  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
298  static_cast<armnn::Half*>(memory));
299  break;
300  case arm_compute::DataType::QSYMM8:
301  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
302  case arm_compute::DataType::QASYMM8_SIGNED:
303  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
304  static_cast<int8_t*>(memory));
305  break;
306  case arm_compute::DataType::S16:
307  case arm_compute::DataType::QSYMM16:
308  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
309  static_cast<int16_t*>(memory));
310  break;
311  case arm_compute::DataType::S32:
312  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
313  static_cast<int32_t*>(memory));
314  break;
315  default:
316  {
318  }
319  }
320  const_cast<ClSubTensorHandle*>(this)->Unmap();
321  }
322 
323  // Only used for testing
324  void CopyInFrom(const void* memory) override
325  {
326  this->Map(true);
327  switch(this->GetDataType())
328  {
329  case arm_compute::DataType::F32:
330  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
331  this->GetTensor());
332  break;
333  case arm_compute::DataType::U8:
334  case arm_compute::DataType::QASYMM8:
335  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
336  this->GetTensor());
337  break;
338  case arm_compute::DataType::F16:
339  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
340  this->GetTensor());
341  break;
342  case arm_compute::DataType::QSYMM8:
343  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
344  case arm_compute::DataType::QASYMM8_SIGNED:
345  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
346  this->GetTensor());
347  break;
348  case arm_compute::DataType::S16:
349  case arm_compute::DataType::QSYMM16:
350  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
351  this->GetTensor());
352  break;
353  case arm_compute::DataType::S32:
354  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
355  this->GetTensor());
356  break;
357  default:
358  {
360  }
361  }
362  this->Unmap();
363  }
364 
365  mutable arm_compute::CLSubTensor m_Tensor;
366  ITensorHandle* parentHandle = nullptr;
367 };
368 
369 /** ClTensorDecorator wraps an existing CL tensor allowing us to override the TensorInfo for it */
370 class ClTensorDecorator : public arm_compute::ICLTensor
371 {
372 public:
374 
375  ClTensorDecorator(arm_compute::ICLTensor* original, const TensorInfo& info);
376 
377  ~ClTensorDecorator() = default;
378 
379  ClTensorDecorator(const ClTensorDecorator&) = delete;
380 
382 
384 
386 
387  arm_compute::ICLTensor* parent();
388 
389  void map(bool blocking = true);
390  using arm_compute::ICLTensor::map;
391 
392  void unmap();
393  using arm_compute::ICLTensor::unmap;
394 
395  virtual arm_compute::ITensorInfo* info() const override;
396  virtual arm_compute::ITensorInfo* info() override;
397  const cl::Buffer& cl_buffer() const override;
398  arm_compute::CLQuantization quantization() const override;
399 
400 protected:
401  // Inherited methods overridden:
402  uint8_t* do_map(cl::CommandQueue& q, bool blocking) override;
403  void do_unmap(cl::CommandQueue& q) override;
404 
405 private:
406  arm_compute::ICLTensor* m_Original;
407  mutable arm_compute::TensorInfo m_TensorInfo;
408 };
409 
411 {
412 public:
414  : m_Tensor(&parent->GetTensor(), info)
415  {
416  m_OriginalHandle = parent;
417  }
418 
419  arm_compute::ICLTensor& GetTensor() override { return m_Tensor; }
420  arm_compute::ICLTensor const& GetTensor() const override { return m_Tensor; }
421 
422  virtual void Allocate() override {}
423  virtual void Manage() override {}
424 
425  virtual const void* Map(bool blocking = true) const override
426  {
427  m_Tensor.map(blocking);
428  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
429  }
430 
431  virtual void Unmap() const override
432  {
433  m_Tensor.unmap();
434  }
435 
436  virtual ITensorHandle* GetParent() const override { return nullptr; }
437 
438  virtual arm_compute::DataType GetDataType() const override
439  {
440  return m_Tensor.info()->data_type();
441  }
442 
443  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>&) override {}
444 
445  TensorShape GetStrides() const override
446  {
447  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
448  }
449 
450  TensorShape GetShape() const override
451  {
452  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
453  }
454 
455 private:
456  // Only used for testing
457  void CopyOutTo(void* memory) const override
458  {
459  const_cast<ClTensorHandleDecorator*>(this)->Map(true);
460  switch(this->GetDataType())
461  {
462  case arm_compute::DataType::F32:
463  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
464  static_cast<float*>(memory));
465  break;
466  case arm_compute::DataType::U8:
467  case arm_compute::DataType::QASYMM8:
468  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
469  static_cast<uint8_t*>(memory));
470  break;
471  case arm_compute::DataType::F16:
472  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
473  static_cast<armnn::Half*>(memory));
474  break;
475  case arm_compute::DataType::QSYMM8:
476  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
477  case arm_compute::DataType::QASYMM8_SIGNED:
478  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
479  static_cast<int8_t*>(memory));
480  break;
481  case arm_compute::DataType::S16:
482  case arm_compute::DataType::QSYMM16:
483  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
484  static_cast<int16_t*>(memory));
485  break;
486  case arm_compute::DataType::S32:
487  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
488  static_cast<int32_t*>(memory));
489  break;
490  default:
491  {
493  }
494  }
495  const_cast<ClTensorHandleDecorator*>(this)->Unmap();
496  }
497 
498  // Only used for testing
499  void CopyInFrom(const void* memory) override
500  {
501  this->Map(true);
502  switch(this->GetDataType())
503  {
504  case arm_compute::DataType::F32:
505  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
506  this->GetTensor());
507  break;
508  case arm_compute::DataType::U8:
509  case arm_compute::DataType::QASYMM8:
510  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
511  this->GetTensor());
512  break;
513  case arm_compute::DataType::F16:
514  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
515  this->GetTensor());
516  break;
517  case arm_compute::DataType::QSYMM8:
518  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
519  case arm_compute::DataType::QASYMM8_SIGNED:
520  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
521  this->GetTensor());
522  break;
523  case arm_compute::DataType::S16:
524  case arm_compute::DataType::QSYMM16:
525  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
526  this->GetTensor());
527  break;
528  case arm_compute::DataType::S32:
529  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
530  this->GetTensor());
531  break;
532  default:
533  {
535  }
536  }
537  this->Unmap();
538  }
539 
540  mutable ClTensorDecorator m_Tensor;
541  IClTensorHandle* m_OriginalHandle = nullptr;
542 };
543 
544 } // namespace armnn
armnn::ClTensorDecorator::parent
arm_compute::ICLTensor * parent()
Definition: ClTensorHandle.cpp:48
armnn::ClSubTensorHandle::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: ClTensorHandle.hpp:275
armnn::ClTensorDecorator::info
virtual arm_compute::ITensorInfo * info() const override
Definition: ClTensorHandle.cpp:32
armnn::Compute::Undefined
@ Undefined
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::ClTensorHandle::ClTensorHandle
ClTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Undefined))
Definition: ClTensorHandle.hpp:39
armnn::ClTensorHandle::CanBeImported
virtual bool CanBeImported(void *memory, MemorySource source) override
Implementations must determine if this memory block can be imported.
Definition: ClTensorHandle.hpp:135
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::ClTensorHandle::GetStrides
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Definition: ClTensorHandle.hpp:99
armnn::ClTensorHandle::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: ClTensorHandle.hpp:104
armnn::ClTensorHandleDecorator::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: ClTensorHandle.hpp:450
armnn::MemorySourceFlags
unsigned int MemorySourceFlags
Definition: MemorySources.hpp:15
armnn::ClSubTensorHandle::ClSubTensorHandle
ClSubTensorHandle(IClTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
Definition: ClTensorHandle.hpp:240
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::ClTensorHandleDecorator::GetTensor
arm_compute::ICLTensor & GetTensor() override
Definition: ClTensorHandle.hpp:419
armnn::ClSubTensorHandle
Definition: ClTensorHandle.hpp:237
armnn::ClTensorHandleDecorator::Allocate
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
Definition: ClTensorHandle.hpp:422
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn::ClTensorHandle::GetDataType
virtual arm_compute::DataType GetDataType() const override
Definition: ClTensorHandle.hpp:89
armnn::ClTensorHandle::GetTensor
arm_compute::CLTensor const & GetTensor() const override
Definition: ClTensorHandle.hpp:50
armnn::ClTensorDecorator::ClTensorDecorator
ClTensorDecorator()
Definition: ClTensorHandle.cpp:20
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::ClTensorHandle::GetTensor
arm_compute::CLTensor & GetTensor() override
Definition: ClTensorHandle.hpp:49
armnn::ClTensorHandle::Unmap
virtual void Unmap() const override
Unmap the tensor data.
Definition: ClTensorHandle.hpp:85
armnn::ClTensorHandle::SetImportEnabledFlag
void SetImportEnabledFlag(bool importEnabledFlag)
Definition: ClTensorHandle.hpp:119
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::ClSubTensorHandle::Unmap
virtual void Unmap() const override
Unmap the tensor data.
Definition: ClTensorHandle.hpp:259
armnn::ClSubTensorHandle::GetParent
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
Definition: ClTensorHandle.hpp:261
armnn::ClTensorDecorator::do_unmap
void do_unmap(cl::CommandQueue &q) override
Definition: ClTensorHandle.cpp:77
armnn::ClTensorDecorator::quantization
arm_compute::CLQuantization quantization() const override
Definition: ClTensorHandle.cpp:53
armnn::ClTensorHandleDecorator
Definition: ClTensorHandle.hpp:410
armnn::ClTensorDecorator::do_map
uint8_t * do_map(cl::CommandQueue &q, bool blocking) override
Definition: ClTensorHandle.cpp:68
armnn::ClSubTensorHandle::SetMemoryGroup
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
Definition: ClTensorHandle.hpp:268
armnn::ClTensorHandle::Import
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
Definition: ClTensorHandle.hpp:124
armnn::ClTensorHandle
Definition: ClTensorHandle.hpp:28
armnn::MemorySource::Undefined
@ Undefined
PolymorphicDowncast.hpp
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::ClTensorHandleDecorator::ClTensorHandleDecorator
ClTensorHandleDecorator(IClTensorHandle *parent, const TensorInfo &info)
Definition: ClTensorHandle.hpp:413
armnn::ClTensorDecorator::operator=
ClTensorDecorator & operator=(const ClTensorDecorator &)=delete
armnn::ClSubTensorHandle::GetStrides
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Definition: ClTensorHandle.hpp:270
armnn::ClTensorDecorator::~ClTensorDecorator
~ClTensorDecorator()=default
armnn::ClTensorDecorator::map
void map(bool blocking=true)
Definition: ClTensorHandle.cpp:58
armnn::ClTensorHandle::SetImportFlags
void SetImportFlags(MemorySourceFlags importFlags)
Definition: ClTensorHandle.hpp:109
armnn::ClSubTensorHandle::GetDataType
virtual arm_compute::DataType GetDataType() const override
Definition: ClTensorHandle.hpp:263
armnn::ClTensorHandle::GetImportFlags
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
Definition: ClTensorHandle.hpp:114
armnn::BoostLogSeverityMapping::info
@ info
armnn::ClSubTensorHandle::Map
virtual const void * Map(bool blocking=true) const override
Map the tensor data for access.
Definition: ClTensorHandle.hpp:254
ArmComputeTensorHandle.hpp
armnn::ClSubTensorHandle::Manage
virtual void Manage() override
Indicate to the memory manager that this resource is active.
Definition: ClTensorHandle.hpp:252
armnn::ClTensorDecorator::cl_buffer
const cl::Buffer & cl_buffer() const override
Definition: ClTensorHandle.cpp:42
armnn::ClTensorHandleDecorator::SetMemoryGroup
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
Definition: ClTensorHandle.hpp:443
armnn::ClTensorHandle::Allocate
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
Definition: ClTensorHandle.hpp:51
Half.hpp
armnn::ClTensorHandle::GetParent
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
Definition: ClTensorHandle.hpp:87
armnn::ClSubTensorHandle::Allocate
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
Definition: ClTensorHandle.hpp:251
armnn::ClTensorHandleDecorator::Map
virtual const void * Map(bool blocking=true) const override
Map the tensor data for access.
Definition: ClTensorHandle.hpp:425
armnn::IClTensorHandle
Definition: IClTensorHandle.hpp:13
armnn::MemoryImportException
Definition: Exceptions.hpp:125
armnn::ClTensorHandle::DecorateTensorHandle
virtual std::shared_ptr< ITensorHandle > DecorateTensorHandle(const TensorInfo &tensorInfo) override
Returns a decorated version of this TensorHandle allowing us to override the TensorInfo for it.
Definition: ClTensorHandle.cpp:12
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::MemorySource
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:244
armnn::ClTensorHandle::Manage
virtual void Manage() override
Indicate to the memory manager that this resource is active.
Definition: ClTensorHandle.hpp:65
armnn::ClTensorDecorator
ClTensorDecorator wraps an existing CL tensor allowing us to override the TensorInfo for it.
Definition: ClTensorHandle.hpp:370
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ClTensorHandle::ClTensorHandle
ClTensorHandle(const TensorInfo &tensorInfo)
Definition: ClTensorHandle.hpp:31
armnn::ClTensorHandleDecorator::GetDataType
virtual arm_compute::DataType GetDataType() const override
Definition: ClTensorHandle.hpp:438
ArmComputeTensorUtils.hpp
armnn::ClTensorHandle::Map
virtual const void * Map(bool blocking=true) const override
Map the tensor data for access.
Definition: ClTensorHandle.hpp:79
armnn::ClTensorHandleDecorator::Unmap
virtual void Unmap() const override
Unmap the tensor data.
Definition: ClTensorHandle.hpp:431
armnn::UnimplementedException
Definition: Exceptions.hpp:98
IClTensorHandle.hpp
armnn::ClTensorHandleDecorator::GetParent
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
Definition: ClTensorHandle.hpp:436
armnn::ClTensorHandleDecorator::GetStrides
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Definition: ClTensorHandle.hpp:445
armnn::ClSubTensorHandle::GetTensor
arm_compute::CLSubTensor const & GetTensor() const override
Definition: ClTensorHandle.hpp:249
armnn::ClTensorHandle::SetMemoryGroup
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
Definition: ClTensorHandle.hpp:94
armnn::ClTensorHandleDecorator::Manage
virtual void Manage() override
Indicate to the memory manager that this resource is active.
Definition: ClTensorHandle.hpp:423
armnn::ClTensorDecorator::unmap
void unmap()
Definition: ClTensorHandle.cpp:63
armnn::ClSubTensorHandle::GetTensor
arm_compute::CLSubTensor & GetTensor() override
Definition: ClTensorHandle.hpp:248
armnn::ClTensorHandleDecorator::GetTensor
arm_compute::ICLTensor const & GetTensor() const override
Definition: ClTensorHandle.hpp:420