16 #include <arm_compute/runtime/MemoryGroup.h> 17 #include <arm_compute/runtime/IMemoryGroup.h> 18 #include <arm_compute/runtime/Tensor.h> 19 #include <arm_compute/runtime/SubTensor.h> 20 #include <arm_compute/core/TensorShape.h> 21 #include <arm_compute/core/Coordinates.h> 32 m_IsImportEnabled(
false)
34 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
40 : m_ImportFlags(importFlags),
42 m_IsImportEnabled(
false)
45 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
48 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
49 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
54 if (!m_IsImportEnabled)
56 armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
63 if (!m_IsImportEnabled)
66 m_MemoryGroup->manage(&m_Tensor);
74 return m_Tensor.info()->data_type();
77 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup)
override 79 m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
82 virtual const void*
Map(
bool )
const override 84 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
87 virtual void Unmap()
const override {}
91 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
96 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
101 m_ImportFlags = importFlags;
106 return m_ImportFlags;
111 m_IsImportEnabled = importEnabledFlag;
116 if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
121 constexpr uintptr_t alignment =
sizeof(size_t);
122 if (reinterpret_cast<uintptr_t>(memory) % alignment)
128 if (!m_Imported && !m_Tensor.buffer())
133 m_Imported = bool(status);
142 if (!m_Imported && m_Tensor.buffer())
145 "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
154 m_Imported = bool(status);
176 void CopyOutTo(
void* memory)
const override 180 case arm_compute::DataType::F32:
181 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
182 static_cast<float*>(memory));
184 case arm_compute::DataType::U8:
185 case arm_compute::DataType::QASYMM8:
186 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
187 static_cast<uint8_t*>(memory));
189 case arm_compute::DataType::QASYMM8_SIGNED:
190 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
191 static_cast<int8_t*>(memory));
193 case arm_compute::DataType::BFLOAT16:
194 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
195 static_cast<armnn::BFloat16*>(memory));
197 case arm_compute::DataType::F16:
198 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
199 static_cast<armnn::Half*>(memory));
201 case arm_compute::DataType::S16:
202 case arm_compute::DataType::QSYMM16:
203 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
204 static_cast<int16_t*>(memory));
206 case arm_compute::DataType::S32:
207 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
208 static_cast<int32_t*>(memory));
218 void CopyInFrom(
const void* memory)
override 222 case arm_compute::DataType::F32:
223 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
226 case arm_compute::DataType::U8:
227 case arm_compute::DataType::QASYMM8:
228 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
231 case arm_compute::DataType::QASYMM8_SIGNED:
232 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
235 case arm_compute::DataType::BFLOAT16:
236 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::BFloat16*>(memory),
239 case arm_compute::DataType::F16:
240 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
243 case arm_compute::DataType::S16:
244 case arm_compute::DataType::QSYMM16:
245 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
248 case arm_compute::DataType::S32:
249 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
259 arm_compute::Tensor m_Tensor;
260 std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
263 bool m_IsImportEnabled;
270 const arm_compute::TensorShape& shape,
272 : m_Tensor(&parent->
GetTensor(), shape, coords)
274 parentHandle = parent;
277 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
278 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
287 return m_Tensor.info()->data_type();
290 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>&)
override {}
292 virtual const void*
Map(
bool )
const override 294 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
296 virtual void Unmap()
const override {}
300 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
305 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
310 void CopyOutTo(
void* memory)
const override 314 case arm_compute::DataType::F32:
315 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
316 static_cast<float*>(memory));
318 case arm_compute::DataType::U8:
319 case arm_compute::DataType::QASYMM8:
320 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
321 static_cast<uint8_t*>(memory));
323 case arm_compute::DataType::QASYMM8_SIGNED:
324 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
325 static_cast<int8_t*>(memory));
327 case arm_compute::DataType::S16:
328 case arm_compute::DataType::QSYMM16:
329 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
330 static_cast<int16_t*>(memory));
332 case arm_compute::DataType::S32:
333 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
334 static_cast<int32_t*>(memory));
344 void CopyInFrom(
const void* memory)
override 348 case arm_compute::DataType::F32:
349 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
352 case arm_compute::DataType::U8:
353 case arm_compute::DataType::QASYMM8:
354 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
357 case arm_compute::DataType::QASYMM8_SIGNED:
358 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
361 case arm_compute::DataType::S16:
362 case arm_compute::DataType::QSYMM16:
363 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
366 case arm_compute::DataType::S32:
367 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
377 arm_compute::SubTensor m_Tensor;
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
virtual arm_compute::DataType GetDataType() const override
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
virtual arm_compute::DataType GetDataType() const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
virtual void Manage() override
Indicate to the memory manager that this resource is active.
virtual void Unmap() const override
Unmap the tensor data.
arm_compute::ITensor const & GetTensor() const override
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
unsigned int MemorySourceFlags
arm_compute::ITensor const & GetTensor() const override
virtual void Unmap() const override
Unmap the tensor data.
Copyright (c) 2021 ARM Limited and Contributors.
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
NeonSubTensorHandle(IAclTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
#define ARMNN_ASSERT(COND)
arm_compute::ITensor & GetTensor() override
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual const void * Map(bool) const override
Map the tensor data for access.
NeonTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
NeonTensorHandle(const TensorInfo &tensorInfo)
virtual const void * Map(bool) const override
Map the tensor data for access.
void SetImportFlags(MemorySourceFlags importFlags)
arm_compute::ITensor & GetTensor() override
void SetImportEnabledFlag(bool importEnabledFlag)
virtual void Manage() override
Indicate to the memory manager that this resource is active.