aboutsummaryrefslogtreecommitdiff
path: root/src/backends/gpuFsa/GpuFsaWorkloadFactory.hpp
blob: 04074cf0ab5188c2cabf4c2961d853909bcdf5ae (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
//
// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once

#include <aclCommon/BaseMemoryManager.hpp>

#include <armnn/Optional.hpp>

#include <arm_compute/core/CL/CLCompileContext.h>

namespace armnn
{

// Dynamic Fusion workload factory.
class GpuFsaWorkloadFactory : public IWorkloadFactory
{
public:
    explicit GpuFsaWorkloadFactory(const std::shared_ptr<GpuFsaMemoryManager>& memoryManager);
    GpuFsaWorkloadFactory();

    ~GpuFsaWorkloadFactory() {}

    const BackendId& GetBackendId() const override;

    static bool IsLayerSupported(const Layer& layer,
                                 Optional<DataType> dataType,
                                 std::string& outReasonIfUnsupported);

    bool SupportsSubTensors() const override { return false; }

    ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateSubTensorHandle instead")
    std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& /*parent*/,
                                                         TensorShape const& /*subTensorShape*/,
                                                         unsigned int const* /*subTensorOrigin*/) const override
    {
        return nullptr;
    }

    ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateTensorHandle instead")
    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
                                                      const bool IsMemoryManaged = true) const override;

    ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateTensorHandle instead")
    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
                                                      DataLayout dataLayout,
                                                      const bool IsMemoryManaged = true) const override;
    void InitializeCLCompileContext();

    std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
                                              const QueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;

private:
    template <typename QueueDescriptorType>
    std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) const;

    mutable std::shared_ptr<GpuFsaMemoryManager> m_MemoryManager;
    arm_compute::CLCompileContext m_CLCompileContext;
};

} // namespace armnn