12 #include <arm_compute/runtime/MemoryGroup.h> 13 #include <arm_compute/runtime/IMemoryGroup.h> 14 #include <arm_compute/runtime/Tensor.h> 15 #include <arm_compute/runtime/SubTensor.h> 16 #include <arm_compute/core/TensorShape.h> 17 #include <arm_compute/core/Coordinates.h> 19 #include <boost/polymorphic_pointer_cast.hpp> 30 m_IsImportEnabled(
false)
32 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
38 : m_ImportFlags(importFlags),
40 m_IsImportEnabled(
false)
43 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
46 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
47 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
52 if (!m_IsImportEnabled)
54 armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
61 if (!m_IsImportEnabled)
63 BOOST_ASSERT(m_MemoryGroup !=
nullptr);
64 m_MemoryGroup->manage(&m_Tensor);
72 return m_Tensor.info()->data_type();
75 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup)
override 77 m_MemoryGroup = boost::polymorphic_pointer_downcast<arm_compute::MemoryGroup>(memoryGroup);
80 virtual const void*
Map(
bool )
const override 82 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
85 virtual void Unmap()
const override {}
89 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
94 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
99 m_ImportFlags = importFlags;
104 return m_ImportFlags;
109 m_IsImportEnabled = importEnabledFlag;
114 if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
119 constexpr uintptr_t alignment =
sizeof(size_t);
120 if (reinterpret_cast<uintptr_t>(memory) % alignment)
126 if (!m_Imported && !m_Tensor.buffer())
131 m_Imported = bool(status);
140 if (!m_Imported && m_Tensor.buffer())
143 "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
152 m_Imported = bool(status);
166 void CopyOutTo(
void* memory)
const override 170 case arm_compute::DataType::F32:
171 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
172 static_cast<float*>(memory));
174 case arm_compute::DataType::U8:
175 case arm_compute::DataType::QASYMM8:
176 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
177 static_cast<uint8_t*>(memory));
179 case arm_compute::DataType::F16:
180 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
181 static_cast<armnn::Half*>(memory));
183 case arm_compute::DataType::S16:
184 case arm_compute::DataType::QSYMM16:
185 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
186 static_cast<int16_t*>(memory));
188 case arm_compute::DataType::S32:
189 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
190 static_cast<int32_t*>(memory));
200 void CopyInFrom(
const void* memory)
override 204 case arm_compute::DataType::F32:
205 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
208 case arm_compute::DataType::U8:
209 case arm_compute::DataType::QASYMM8:
210 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
213 case arm_compute::DataType::F16:
214 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
217 case arm_compute::DataType::S16:
218 case arm_compute::DataType::QSYMM16:
219 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
222 case arm_compute::DataType::S32:
223 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
233 arm_compute::Tensor m_Tensor;
234 std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
237 bool m_IsImportEnabled;
244 const arm_compute::TensorShape& shape,
246 : m_Tensor(&parent->
GetTensor(), shape, coords)
248 parentHandle = parent;
251 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
252 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
261 return m_Tensor.info()->data_type();
264 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>&)
override {}
266 virtual const void*
Map(
bool )
const override 268 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
270 virtual void Unmap()
const override {}
274 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
279 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
284 void CopyOutTo(
void* memory)
const override 288 case arm_compute::DataType::F32:
289 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
290 static_cast<float*>(memory));
292 case arm_compute::DataType::U8:
293 case arm_compute::DataType::QASYMM8:
294 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
295 static_cast<uint8_t*>(memory));
297 case arm_compute::DataType::S16:
298 case arm_compute::DataType::QSYMM16:
299 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
300 static_cast<int16_t*>(memory));
302 case arm_compute::DataType::S32:
303 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
304 static_cast<int32_t*>(memory));
314 void CopyInFrom(
const void* memory)
override 318 case arm_compute::DataType::F32:
319 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
322 case arm_compute::DataType::U8:
323 case arm_compute::DataType::QASYMM8:
324 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
327 case arm_compute::DataType::S16:
328 case arm_compute::DataType::QSYMM16:
329 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
332 case arm_compute::DataType::S32:
333 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
343 arm_compute::SubTensor m_Tensor;
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
virtual arm_compute::DataType GetDataType() const override
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
virtual arm_compute::DataType GetDataType() const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
virtual void Manage() override
Indicate to the memory manager that this resource is active.
virtual void Unmap() const override
Unmap the tensor data.
arm_compute::ITensor const & GetTensor() const override
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
unsigned int MemorySourceFlags
arm_compute::ITensor const & GetTensor() const override
virtual void Unmap() const override
Unmap the tensor data.
Copyright (c) 2020 ARM Limited.
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
NeonSubTensorHandle(IAclTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
arm_compute::ITensor & GetTensor() override
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual const void * Map(bool) const override
Map the tensor data for access.
NeonTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
NeonTensorHandle(const TensorInfo &tensorInfo)
virtual const void * Map(bool) const override
Map the tensor data for access.
void SetImportFlags(MemorySourceFlags importFlags)
arm_compute::ITensor & GetTensor() override
void SetImportEnabledFlag(bool importEnabledFlag)
virtual void Manage() override
Indicate to the memory manager that this resource is active.