14 #include <arm_compute/runtime/CL/CLTensor.h> 15 #include <arm_compute/runtime/CL/CLSubTensor.h> 16 #include <arm_compute/runtime/IMemoryGroup.h> 17 #include <arm_compute/runtime/MemoryGroup.h> 18 #include <arm_compute/core/TensorShape.h> 19 #include <arm_compute/core/Coordinates.h> 28 virtual arm_compute::ICLTensor&
GetTensor() = 0;
29 virtual arm_compute::ICLTensor
const&
GetTensor()
const = 0;
31 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup) = 0;
40 m_IsImportEnabled(false)
42 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
48 : m_ImportFlags(importFlags),
50 m_IsImportEnabled(false)
52 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
55 arm_compute::CLTensor&
GetTensor()
override {
return m_Tensor; }
56 arm_compute::CLTensor
const&
GetTensor()
const override {
return m_Tensor; }
60 if (m_IsImportEnabled)
66 armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
74 if (m_IsImportEnabled)
80 assert(m_MemoryGroup !=
nullptr);
81 m_MemoryGroup->manage(&m_Tensor);
85 virtual const void*
Map(
bool blocking =
true)
const override 87 const_cast<arm_compute::CLTensor*
>(&m_Tensor)->map(blocking);
88 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
91 virtual void Unmap()
const override {
const_cast<arm_compute::CLTensor*
>(&m_Tensor)->unmap(); }
97 return m_Tensor.info()->data_type();
100 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup)
override 102 m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
107 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
112 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
117 m_ImportFlags = importFlags;
122 return m_ImportFlags;
127 m_IsImportEnabled = importEnabledFlag;
133 if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
143 void CopyOutTo(
void* memory)
const override 148 case arm_compute::DataType::F32:
149 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
150 static_cast<float*>(memory));
152 case arm_compute::DataType::U8:
153 case arm_compute::DataType::QASYMM8:
154 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
155 static_cast<uint8_t*>(memory));
157 case arm_compute::DataType::QSYMM8:
158 case arm_compute::DataType::QSYMM8_PER_CHANNEL:
159 case arm_compute::DataType::QASYMM8_SIGNED:
160 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
161 static_cast<int8_t*>(memory));
163 case arm_compute::DataType::F16:
164 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
165 static_cast<armnn::Half*>(memory));
167 case arm_compute::DataType::S16:
168 case arm_compute::DataType::QSYMM16:
169 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
170 static_cast<int16_t*>(memory));
172 case arm_compute::DataType::S32:
173 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
174 static_cast<int32_t*>(memory));
190 case arm_compute::DataType::F32:
191 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
194 case arm_compute::DataType::U8:
195 case arm_compute::DataType::QASYMM8:
196 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
199 case arm_compute::DataType::F16:
200 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
203 case arm_compute::DataType::S16:
204 case arm_compute::DataType::QSYMM8:
205 case arm_compute::DataType::QSYMM8_PER_CHANNEL:
206 case arm_compute::DataType::QASYMM8_SIGNED:
207 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
210 case arm_compute::DataType::QSYMM16:
211 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
214 case arm_compute::DataType::S32:
215 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
226 arm_compute::CLTensor m_Tensor;
227 std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
230 bool m_IsImportEnabled;
237 const arm_compute::TensorShape& shape,
239 : m_Tensor(&parent->
GetTensor(), shape, coords)
241 parentHandle = parent;
244 arm_compute::CLSubTensor&
GetTensor()
override {
return m_Tensor; }
245 arm_compute::CLSubTensor
const&
GetTensor()
const override {
return m_Tensor; }
250 virtual const void*
Map(
bool blocking =
true)
const override 252 const_cast<arm_compute::CLSubTensor*
>(&m_Tensor)->map(blocking);
253 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
255 virtual void Unmap()
const override {
const_cast<arm_compute::CLSubTensor*
>(&m_Tensor)->unmap(); }
261 return m_Tensor.info()->data_type();
264 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>&)
override {}
268 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
273 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
278 void CopyOutTo(
void* memory)
const override 283 case arm_compute::DataType::F32:
284 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
285 static_cast<float*>(memory));
287 case arm_compute::DataType::U8:
288 case arm_compute::DataType::QASYMM8:
289 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
290 static_cast<uint8_t*>(memory));
292 case arm_compute::DataType::F16:
293 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
294 static_cast<armnn::Half*>(memory));
296 case arm_compute::DataType::QSYMM8:
297 case arm_compute::DataType::QSYMM8_PER_CHANNEL:
298 case arm_compute::DataType::QASYMM8_SIGNED:
299 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
300 static_cast<int8_t*>(memory));
302 case arm_compute::DataType::S16:
303 case arm_compute::DataType::QSYMM16:
304 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
305 static_cast<int16_t*>(memory));
307 case arm_compute::DataType::S32:
308 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
309 static_cast<int32_t*>(memory));
325 case arm_compute::DataType::F32:
326 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
329 case arm_compute::DataType::U8:
330 case arm_compute::DataType::QASYMM8:
331 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
334 case arm_compute::DataType::F16:
335 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
338 case arm_compute::DataType::QSYMM8:
339 case arm_compute::DataType::QSYMM8_PER_CHANNEL:
340 case arm_compute::DataType::QASYMM8_SIGNED:
341 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
344 case arm_compute::DataType::S16:
345 case arm_compute::DataType::QSYMM16:
346 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
349 case arm_compute::DataType::S32:
350 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
361 mutable arm_compute::CLSubTensor m_Tensor;
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup)=0
virtual arm_compute::DataType GetDataType() const =0
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
virtual arm_compute::DataType GetDataType() const override
void SetImportFlags(MemorySourceFlags importFlags)
unsigned int MemorySourceFlags
virtual const void * Map(bool blocking=true) const override
Map the tensor data for access.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
void SetImportEnabledFlag(bool importEnabledFlag)
virtual void Unmap() const override
Unmap the tensor data.
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
arm_compute::CLSubTensor const & GetTensor() const override
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
virtual void Manage() override
Indicate to the memory manager that this resource is active.
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
ClSubTensorHandle(IClTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
virtual arm_compute::DataType GetDataType() const override
arm_compute::CLTensor const & GetTensor() const override
arm_compute::CLTensor & GetTensor() override
ClTensorHandle(const TensorInfo &tensorInfo)
virtual void CopyOutTo(void *memory) const =0
Testing support to be able to verify and set tensor data content.
virtual void Manage() override
Indicate to the memory manager that this resource is active.
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
virtual const void * Map(bool blocking=true) const override
Map the tensor data for access.
virtual void Unmap() const =0
Unmap the tensor data.
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
MemorySource
Define the Memory Source to reduce copies.
virtual void CopyInFrom(const void *memory)=0
ClTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Undefined))
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
arm_compute::CLSubTensor & GetTensor() override
virtual arm_compute::ICLTensor & GetTensor()=0
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
virtual void Unmap() const override
Unmap the tensor data.
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...