16 #include <arm_compute/runtime/MemoryGroup.h> 17 #include <arm_compute/runtime/IMemoryGroup.h> 18 #include <arm_compute/runtime/Tensor.h> 19 #include <arm_compute/runtime/SubTensor.h> 20 #include <arm_compute/core/TensorShape.h> 21 #include <arm_compute/core/Coordinates.h> 32 m_IsImportEnabled(false),
35 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
41 : m_ImportFlags(importFlags),
43 m_IsImportEnabled(false),
48 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
51 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
52 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
57 if (!m_IsImportEnabled)
59 armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
66 if (!m_IsImportEnabled)
69 m_MemoryGroup->manage(&m_Tensor);
77 return m_Tensor.info()->data_type();
80 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup)
override 82 m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
85 virtual const void*
Map(
bool )
const override 87 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
90 virtual void Unmap()
const override {}
94 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
99 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
104 m_ImportFlags = importFlags;
109 return m_ImportFlags;
114 m_IsImportEnabled = importEnabledFlag;
120 if (reinterpret_cast<uintptr_t>(memory) % m_TypeAlignment)
129 if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
139 if (!m_Imported && !m_Tensor.buffer())
144 m_Imported = bool(status);
153 if (!m_Imported && m_Tensor.buffer())
156 "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
165 m_Imported = bool(status);
187 void CopyOutTo(
void* memory)
const override 191 case arm_compute::DataType::F32:
192 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
193 static_cast<float*>(memory));
195 case arm_compute::DataType::U8:
196 case arm_compute::DataType::QASYMM8:
197 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
198 static_cast<uint8_t*>(memory));
200 case arm_compute::DataType::QSYMM8:
201 case arm_compute::DataType::QASYMM8_SIGNED:
202 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
203 static_cast<int8_t*>(memory));
205 case arm_compute::DataType::BFLOAT16:
206 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
207 static_cast<armnn::BFloat16*>(memory));
209 case arm_compute::DataType::F16:
210 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
211 static_cast<armnn::Half*>(memory));
213 case arm_compute::DataType::S16:
214 case arm_compute::DataType::QSYMM16:
215 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
216 static_cast<int16_t*>(memory));
218 case arm_compute::DataType::S32:
219 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
220 static_cast<int32_t*>(memory));
230 void CopyInFrom(
const void* memory)
override 234 case arm_compute::DataType::F32:
235 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
238 case arm_compute::DataType::U8:
239 case arm_compute::DataType::QASYMM8:
240 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
243 case arm_compute::DataType::QSYMM8:
244 case arm_compute::DataType::QASYMM8_SIGNED:
245 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
248 case arm_compute::DataType::BFLOAT16:
249 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::BFloat16*>(memory),
252 case arm_compute::DataType::F16:
253 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
256 case arm_compute::DataType::S16:
257 case arm_compute::DataType::QSYMM16:
258 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
261 case arm_compute::DataType::S32:
262 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
272 arm_compute::Tensor m_Tensor;
273 std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
276 bool m_IsImportEnabled;
277 const uintptr_t m_TypeAlignment;
284 const arm_compute::TensorShape& shape,
286 : m_Tensor(&parent->
GetTensor(), shape, coords)
288 parentHandle = parent;
291 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
292 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
301 return m_Tensor.info()->data_type();
304 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>&)
override {}
306 virtual const void*
Map(
bool )
const override 308 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
310 virtual void Unmap()
const override {}
314 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
319 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
324 void CopyOutTo(
void* memory)
const override 328 case arm_compute::DataType::F32:
329 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
330 static_cast<float*>(memory));
332 case arm_compute::DataType::U8:
333 case arm_compute::DataType::QASYMM8:
334 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
335 static_cast<uint8_t*>(memory));
337 case arm_compute::DataType::QSYMM8:
338 case arm_compute::DataType::QASYMM8_SIGNED:
339 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
340 static_cast<int8_t*>(memory));
342 case arm_compute::DataType::S16:
343 case arm_compute::DataType::QSYMM16:
344 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
345 static_cast<int16_t*>(memory));
347 case arm_compute::DataType::S32:
348 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
349 static_cast<int32_t*>(memory));
359 void CopyInFrom(
const void* memory)
override 363 case arm_compute::DataType::F32:
364 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
367 case arm_compute::DataType::U8:
368 case arm_compute::DataType::QASYMM8:
369 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
372 case arm_compute::DataType::QSYMM8:
373 case arm_compute::DataType::QASYMM8_SIGNED:
374 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
377 case arm_compute::DataType::S16:
378 case arm_compute::DataType::QSYMM16:
379 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
382 case arm_compute::DataType::S32:
383 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
393 arm_compute::SubTensor m_Tensor;
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
bool CanBeImported(void *memory, MemorySource source) override
Implementations must determine if this memory block can be imported.
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
virtual arm_compute::DataType GetDataType() const override
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
virtual arm_compute::DataType GetDataType() const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
virtual void Manage() override
Indicate to the memory manager that this resource is active.
virtual void Unmap() const override
Unmap the tensor data.
arm_compute::ITensor const & GetTensor() const override
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
unsigned int MemorySourceFlags
arm_compute::ITensor const & GetTensor() const override
virtual void Unmap() const override
Unmap the tensor data.
Copyright (c) 2021 ARM Limited and Contributors.
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
void IgnoreUnused(Ts &&...)
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
NeonSubTensorHandle(IAclTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
#define ARMNN_ASSERT(COND)
arm_compute::ITensor & GetTensor() override
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual const void * Map(bool) const override
Map the tensor data for access.
NeonTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
MemorySource
Define the Memory Source to reduce copies.
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
NeonTensorHandle(const TensorInfo &tensorInfo)
virtual const void * Map(bool) const override
Map the tensor data for access.
void SetImportFlags(MemorySourceFlags importFlags)
arm_compute::ITensor & GetTensor() override
void SetImportEnabledFlag(bool importEnabledFlag)
constexpr unsigned int GetDataTypeSize(DataType dataType)
virtual void Manage() override
Indicate to the memory manager that this resource is active.