aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/WorkingMemHandle.hpp
blob: cef6fb6fd39867e3ab1f3ebd9c418e787fe9f084 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
//
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include "Layer.hpp"
#include "Network.hpp"
#include "WorkingMemDescriptor.hpp"

#include <armnn/IWorkingMemHandle.hpp>
#include <armnn/Tensor.hpp>

#include <unordered_map>

namespace armnn
{

namespace experimental
{

class WorkingMemHandle final : public IWorkingMemHandle
{

public:
    WorkingMemHandle(NetworkId networkId,
                     std::vector<WorkingMemDescriptor> workingMemDescriptors,
                     std::unordered_map<LayerGuid, WorkingMemDescriptor> workingMemDescriptorMap);

    ~WorkingMemHandle()
    { FreeWorkingMemory(); }

    NetworkId GetNetworkId() override
    {
        return m_NetworkId;
    }

    /// Allocate the backing memory required for execution. If this is not called, then allocation will be
    /// deferred to execution time. The mutex must be locked.
    void Allocate() override
    {
        if (m_IsAllocated)
        {
            return;
        }
        m_IsAllocated = true;

        // Iterate through all WorkingMemDescriptors calling allocate() on each input and output in turn
        for (auto workingMemDescriptor :  m_WorkingMemDescriptors)
        {
            for (auto& input : workingMemDescriptor.m_Inputs)
            {
                input->Allocate();
            }
            for (auto& output : workingMemDescriptor.m_Outputs)
            {
                output->Allocate();
            }
        }
    }

    /// Free the backing memory required for execution. The mutex must be locked.
    void Free() override
    {
        if (!m_IsAllocated)
        {
            return;
        }
        m_IsAllocated = false;

        // Iterate through all WorkingMemDescriptors calling free() on each input and output in turn
        for (auto workingMemDescriptor :  m_WorkingMemDescriptors)
        {
            for (auto& input : workingMemDescriptor.m_Inputs)
            {
                input->Unmap();
            }
            for (auto& output : workingMemDescriptor.m_Outputs)
            {
                output->Unmap();
            }
        }
    }

    /// IsAllocated returns true if the backing memory is currently allocated. The mutex must be locked.
    bool IsAllocated() override
    {
        return m_IsAllocated;
    }

    /// Get a mutex which can be used for synchronizing access to the WorkingMemHandle object.
    std::mutex& GetMutex() override
    {
        return m_Mutex;
    }

    /// Get the WorkingMemDescriptor for a Layer. The mutex must be locked.
    WorkingMemDescriptor& GetWorkingMemDescriptor(LayerGuid id) override
    {
        auto result = m_WorkingMemDescriptorMap.find(id);
        ARMNN_ASSERT(result != m_WorkingMemDescriptorMap.end());
        return result->second;
    }

    /// Get the WorkingMemDescriptor at an index. The WorkingMemDescriptors are stored in the same order as
    /// the Workloads in a topologically sorted graph. The mutex must be locked.
    WorkingMemDescriptor& GetWorkingMemDescriptorAt(unsigned int id) override
    {
        return m_WorkingMemDescriptors[id];
    }

private:
    void FreeWorkingMemory();

    NetworkId m_NetworkId;
    std::shared_ptr<ProfilerImpl> m_Profiler;

    std::vector<WorkingMemDescriptor> m_WorkingMemDescriptors;
    std::unordered_map<LayerGuid, WorkingMemDescriptor> m_WorkingMemDescriptorMap;
    bool m_IsAllocated;
    std::mutex m_Mutex;
};

} // end experimental namespace

} // end armnn namespace