aboutsummaryrefslogtreecommitdiff
path: root/src/backends/gpuFsa/GpuFsaTensorHandleFactory.cpp
blob: c1a34d24e5840ae41cca4b03d884837829aca90b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
//
// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include "GpuFsaTensorHandle.hpp"
#include "GpuFsaTensorHandleFactory.hpp"

namespace armnn
{

using FactoryId = ITensorHandleFactory::FactoryId;

std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateSubTensorHandle(ITensorHandle& parent,
                                                                            const TensorShape& subTensorShape,
                                                                            const unsigned int* subTensorOrigin) const
{
    arm_compute::Coordinates coords;
    arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);

    coords.set_num_dimensions(subTensorShape.GetNumDimensions());
    for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); ++i)
    {
        // Arm compute indexes tensor coords in reverse order.
        unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
        coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
    }

    const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());

    // In order for ACL to support subtensors the concat axis cannot be on x or y and the values of x and y
    // must match the parent shapes
    if (coords.x() != 0 || coords.y() != 0)
    {
        return nullptr;
    }
    if ((parentShape.x() != shape.x()) || (parentShape.y() != shape.y()))
    {
        return nullptr;
    }

    if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
    {
        return nullptr;
    }

    return std::make_unique<GpuFsaSubTensorHandle>(PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
}

std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
{
    return GpuFsaTensorHandleFactory::CreateTensorHandle(tensorInfo, true);
}

std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
                                                                             DataLayout dataLayout) const
{
    return GpuFsaTensorHandleFactory::CreateTensorHandle(tensorInfo, dataLayout, true);
}

std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
                                                                             const bool IsMemoryManaged) const
{
    std::unique_ptr<GpuFsaTensorHandle> tensorHandle = std::make_unique<GpuFsaTensorHandle>(tensorInfo);
    if (!IsMemoryManaged)
    {
        ARMNN_LOG(warning) << "GpuFsaTensorHandleFactory only has support for memory managed.";
    }
    tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
    return tensorHandle;
}

std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
                                                                             DataLayout dataLayout,
                                                                             const bool IsMemoryManaged) const
{
    std::unique_ptr<GpuFsaTensorHandle> tensorHandle = std::make_unique<GpuFsaTensorHandle>(tensorInfo, dataLayout);
    if (!IsMemoryManaged)
    {
        ARMNN_LOG(warning) << "GpuFsaTensorHandleFactory only has support for memory managed.";
    }
    tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
    return tensorHandle;
}

const FactoryId& GpuFsaTensorHandleFactory::GetIdStatic()
{
    static const FactoryId s_Id(GpuFsaTensorHandleFactoryId());
    return s_Id;
}

const FactoryId& GpuFsaTensorHandleFactory::GetId() const
{
    return GetIdStatic();
}

bool GpuFsaTensorHandleFactory::SupportsSubTensors() const
{
    return true;
}

MemorySourceFlags GpuFsaTensorHandleFactory::GetExportFlags() const
{
    return MemorySourceFlags(MemorySource::Undefined);
}

MemorySourceFlags GpuFsaTensorHandleFactory::GetImportFlags() const
{
    return MemorySourceFlags(MemorySource::Undefined);
}

} // namespace armnn