9 #include <boost/core/ignore_unused.hpp> 18 const unsigned int* subTensorOrigin)
21 const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
29 coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
32 const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.
GetShape());
33 if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
38 return std::make_unique<NeonSubTensorHandle>(
39 boost::polymorphic_downcast<IAclTensorHandle*>(&parent), shape, coords);
54 const bool IsMemoryManaged)
const 56 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
59 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
62 tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
70 const bool IsMemoryManaged)
const 72 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
75 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
78 tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
unsigned int GetNumDimensions() const
static const FactoryId & GetIdStatic()
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, const TensorShape &subTensorShape, const unsigned int *subTensorOrigin) const override
virtual TensorShape GetShape() const =0
constexpr const char * NeonTensorHandleFactoryId()
ITensorHandleFactory::FactoryId FactoryId
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
unsigned int MemorySourceFlags
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const override
bool SupportsSubTensors() const override
const FactoryId & GetId() const override
MemorySourceFlags GetImportFlags() const override
MemorySourceFlags GetExportFlags() const override