16 #include <arm_compute/runtime/MemoryGroup.h> 17 #include <arm_compute/runtime/IMemoryGroup.h> 18 #include <arm_compute/runtime/Tensor.h> 19 #include <arm_compute/runtime/SubTensor.h> 20 #include <arm_compute/core/TensorShape.h> 21 #include <arm_compute/core/Coordinates.h> 32 m_IsImportEnabled(false),
35 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
41 : m_ImportFlags(importFlags),
43 m_IsImportEnabled(false),
48 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
51 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
52 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
57 if (!m_IsImportEnabled)
59 armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
66 if (!m_IsImportEnabled)
69 m_MemoryGroup->manage(&m_Tensor);
77 return m_Tensor.info()->data_type();
80 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup)
override 82 m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
85 virtual const void*
Map(
bool )
const override 87 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
90 virtual void Unmap()
const override {}
94 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
99 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
104 m_ImportFlags = importFlags;
109 return m_ImportFlags;
114 m_IsImportEnabled = importEnabledFlag;
119 if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
123 if (reinterpret_cast<uintptr_t>(memory) % m_TypeAlignment)
129 if (!m_Imported && !m_Tensor.buffer())
134 m_Imported = bool(status);
143 if (!m_Imported && m_Tensor.buffer())
146 "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
155 m_Imported = bool(status);
177 void CopyOutTo(
void* memory)
const override 181 case arm_compute::DataType::F32:
182 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
183 static_cast<float*>(memory));
185 case arm_compute::DataType::U8:
186 case arm_compute::DataType::QASYMM8:
187 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
188 static_cast<uint8_t*>(memory));
190 case arm_compute::DataType::QSYMM8:
191 case arm_compute::DataType::QASYMM8_SIGNED:
192 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
193 static_cast<int8_t*>(memory));
195 case arm_compute::DataType::BFLOAT16:
196 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
197 static_cast<armnn::BFloat16*>(memory));
199 case arm_compute::DataType::F16:
200 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
201 static_cast<armnn::Half*>(memory));
203 case arm_compute::DataType::S16:
204 case arm_compute::DataType::QSYMM16:
205 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
206 static_cast<int16_t*>(memory));
208 case arm_compute::DataType::S32:
209 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
210 static_cast<int32_t*>(memory));
220 void CopyInFrom(
const void* memory)
override 224 case arm_compute::DataType::F32:
225 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
228 case arm_compute::DataType::U8:
229 case arm_compute::DataType::QASYMM8:
230 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
233 case arm_compute::DataType::QSYMM8:
234 case arm_compute::DataType::QASYMM8_SIGNED:
235 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
238 case arm_compute::DataType::BFLOAT16:
239 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::BFloat16*>(memory),
242 case arm_compute::DataType::F16:
243 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
246 case arm_compute::DataType::S16:
247 case arm_compute::DataType::QSYMM16:
248 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
251 case arm_compute::DataType::S32:
252 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
262 arm_compute::Tensor m_Tensor;
263 std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
266 bool m_IsImportEnabled;
267 const uintptr_t m_TypeAlignment;
274 const arm_compute::TensorShape& shape,
276 : m_Tensor(&parent->
GetTensor(), shape, coords)
278 parentHandle = parent;
281 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
282 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
291 return m_Tensor.info()->data_type();
294 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>&)
override {}
296 virtual const void*
Map(
bool )
const override 298 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
300 virtual void Unmap()
const override {}
304 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
309 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
314 void CopyOutTo(
void* memory)
const override 318 case arm_compute::DataType::F32:
319 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
320 static_cast<float*>(memory));
322 case arm_compute::DataType::U8:
323 case arm_compute::DataType::QASYMM8:
324 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
325 static_cast<uint8_t*>(memory));
327 case arm_compute::DataType::QSYMM8:
328 case arm_compute::DataType::QASYMM8_SIGNED:
329 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
330 static_cast<int8_t*>(memory));
332 case arm_compute::DataType::S16:
333 case arm_compute::DataType::QSYMM16:
334 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
335 static_cast<int16_t*>(memory));
337 case arm_compute::DataType::S32:
338 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
339 static_cast<int32_t*>(memory));
349 void CopyInFrom(
const void* memory)
override 353 case arm_compute::DataType::F32:
354 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
357 case arm_compute::DataType::U8:
358 case arm_compute::DataType::QASYMM8:
359 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
362 case arm_compute::DataType::QSYMM8:
363 case arm_compute::DataType::QASYMM8_SIGNED:
364 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
367 case arm_compute::DataType::S16:
368 case arm_compute::DataType::QSYMM16:
369 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
372 case arm_compute::DataType::S32:
373 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
383 arm_compute::SubTensor m_Tensor;
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
virtual arm_compute::DataType GetDataType() const override
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
virtual arm_compute::DataType GetDataType() const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
virtual void Manage() override
Indicate to the memory manager that this resource is active.
virtual void Unmap() const override
Unmap the tensor data.
arm_compute::ITensor const & GetTensor() const override
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
unsigned int MemorySourceFlags
arm_compute::ITensor const & GetTensor() const override
virtual void Unmap() const override
Unmap the tensor data.
Copyright (c) 2021 ARM Limited and Contributors.
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
NeonSubTensorHandle(IAclTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
#define ARMNN_ASSERT(COND)
arm_compute::ITensor & GetTensor() override
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual const void * Map(bool) const override
Map the tensor data for access.
NeonTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
MemorySource
Define the Memory Source to reduce copies.
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
NeonTensorHandle(const TensorInfo &tensorInfo)
virtual const void * Map(bool) const override
Map the tensor data for access.
void SetImportFlags(MemorySourceFlags importFlags)
arm_compute::ITensor & GetTensor() override
void SetImportEnabledFlag(bool importEnabledFlag)
constexpr unsigned int GetDataTypeSize(DataType dataType)
virtual void Manage() override
Indicate to the memory manager that this resource is active.