16 #include <arm_compute/runtime/MemoryGroup.h> 17 #include <arm_compute/runtime/IMemoryGroup.h> 18 #include <arm_compute/runtime/Tensor.h> 19 #include <arm_compute/runtime/SubTensor.h> 20 #include <arm_compute/core/TensorShape.h> 21 #include <arm_compute/core/Coordinates.h> 32 m_IsImportEnabled(
false)
34 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
40 : m_ImportFlags(importFlags),
42 m_IsImportEnabled(
false)
45 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
48 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
49 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
54 if (!m_IsImportEnabled)
56 armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
63 if (!m_IsImportEnabled)
66 m_MemoryGroup->manage(&m_Tensor);
74 return m_Tensor.info()->data_type();
77 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup)
override 79 m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
82 virtual const void*
Map(
bool )
const override 84 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
87 virtual void Unmap()
const override {}
91 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
96 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
101 m_ImportFlags = importFlags;
106 return m_ImportFlags;
111 m_IsImportEnabled = importEnabledFlag;
116 if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
121 constexpr uintptr_t alignment =
sizeof(size_t);
122 if (reinterpret_cast<uintptr_t>(memory) % alignment)
128 if (!m_Imported && !m_Tensor.buffer())
133 m_Imported = bool(status);
142 if (!m_Imported && m_Tensor.buffer())
145 "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
154 m_Imported = bool(status);
168 void CopyOutTo(
void* memory)
const override 172 case arm_compute::DataType::F32:
173 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
174 static_cast<float*>(memory));
176 case arm_compute::DataType::U8:
177 case arm_compute::DataType::QASYMM8:
178 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
179 static_cast<uint8_t*>(memory));
181 case arm_compute::DataType::QASYMM8_SIGNED:
182 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
183 static_cast<int8_t*>(memory));
185 case arm_compute::DataType::BFLOAT16:
186 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
187 static_cast<armnn::BFloat16*>(memory));
189 case arm_compute::DataType::F16:
190 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
191 static_cast<armnn::Half*>(memory));
193 case arm_compute::DataType::S16:
194 case arm_compute::DataType::QSYMM16:
195 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
196 static_cast<int16_t*>(memory));
198 case arm_compute::DataType::S32:
199 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
200 static_cast<int32_t*>(memory));
210 void CopyInFrom(
const void* memory)
override 214 case arm_compute::DataType::F32:
215 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
218 case arm_compute::DataType::U8:
219 case arm_compute::DataType::QASYMM8:
220 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
223 case arm_compute::DataType::QASYMM8_SIGNED:
224 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
227 case arm_compute::DataType::BFLOAT16:
228 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::BFloat16*>(memory),
231 case arm_compute::DataType::F16:
232 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
235 case arm_compute::DataType::S16:
236 case arm_compute::DataType::QSYMM16:
237 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
240 case arm_compute::DataType::S32:
241 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
251 arm_compute::Tensor m_Tensor;
252 std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
255 bool m_IsImportEnabled;
262 const arm_compute::TensorShape& shape,
264 : m_Tensor(&parent->
GetTensor(), shape, coords)
266 parentHandle = parent;
269 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
270 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
279 return m_Tensor.info()->data_type();
282 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>&)
override {}
284 virtual const void*
Map(
bool )
const override 286 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
288 virtual void Unmap()
const override {}
292 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
297 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
302 void CopyOutTo(
void* memory)
const override 306 case arm_compute::DataType::F32:
307 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
308 static_cast<float*>(memory));
310 case arm_compute::DataType::U8:
311 case arm_compute::DataType::QASYMM8:
312 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
313 static_cast<uint8_t*>(memory));
315 case arm_compute::DataType::QASYMM8_SIGNED:
316 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
317 static_cast<int8_t*>(memory));
319 case arm_compute::DataType::S16:
320 case arm_compute::DataType::QSYMM16:
321 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
322 static_cast<int16_t*>(memory));
324 case arm_compute::DataType::S32:
325 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
326 static_cast<int32_t*>(memory));
336 void CopyInFrom(
const void* memory)
override 340 case arm_compute::DataType::F32:
341 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
344 case arm_compute::DataType::U8:
345 case arm_compute::DataType::QASYMM8:
346 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
349 case arm_compute::DataType::QASYMM8_SIGNED:
350 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
353 case arm_compute::DataType::S16:
354 case arm_compute::DataType::QSYMM16:
355 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
358 case arm_compute::DataType::S32:
359 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
369 arm_compute::SubTensor m_Tensor;
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
virtual arm_compute::DataType GetDataType() const override
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
virtual arm_compute::DataType GetDataType() const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
virtual void Manage() override
Indicate to the memory manager that this resource is active.
virtual void Unmap() const override
Unmap the tensor data.
arm_compute::ITensor const & GetTensor() const override
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
unsigned int MemorySourceFlags
arm_compute::ITensor const & GetTensor() const override
virtual void Unmap() const override
Unmap the tensor data.
Copyright (c) 2020 ARM Limited.
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
NeonSubTensorHandle(IAclTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
#define ARMNN_ASSERT(COND)
arm_compute::ITensor & GetTensor() override
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual const void * Map(bool) const override
Map the tensor data for access.
NeonTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
NeonTensorHandle(const TensorInfo &tensorInfo)
virtual const void * Map(bool) const override
Map the tensor data for access.
void SetImportFlags(MemorySourceFlags importFlags)
arm_compute::ITensor & GetTensor() override
void SetImportEnabledFlag(bool importEnabledFlag)
virtual void Manage() override
Indicate to the memory manager that this resource is active.