aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/BatchMatMulImpl.hpp
blob: 19971a4af329aaeb98422904c06b9d17f4d44190 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
//
// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include "Encoders.hpp"
#include "Decoders.hpp"

#include <armnn/backends/WorkloadData.hpp>

namespace armnn
{

class BatchMatMul {
public:
    BatchMatMul(const BatchMatMulDescriptor& params,
                const TensorInfo& inputXInfo,
                const TensorInfo& inputYInfo,
                const TensorInfo& outputInfo,
                Decoder<float>& inputXDecoder,
                Decoder<float>& inputYDecoder,
                Encoder<float>& outputEncoder);

private:
    enum DataSlot
    {
        InputX = 0,
        InputY = 1,
        Output = 2
    };

    const BatchMatMulDescriptor& params;
    TensorInfo inputXInfo;
    TensorInfo inputYInfo;
    TensorInfo outputInfo;
    Decoder<float>& inputXDecoder;
    Decoder<float>& inputYDecoder;
    Encoder<float>& outputEncoder;

    std::vector<float> inputXData;
    std::vector<float> inputYData;

    void ApplyBatchMatMul();

    void ApplyParams();

    void Transpose(DataSlot type);

    void Adjoint(DataSlot type);

    void RecurseTensor(const TensorInfo& tensorInfo,
                       std::function<void(const std::vector<unsigned int>&)> const& operation,
                       std::vector<unsigned int>& curIdx,
                       unsigned int curDim);

    // Adjusts it for when input tensors are of unequal rank
    void AdjustAxesToMulForUnequalRanks(std::pair<unsigned int, unsigned int>& axesXToMul,
                                        std::pair<unsigned int, unsigned int>& axesYToMul);

    float GetValueAt(DataSlot type, std::vector<unsigned int> idx, const std::vector<float>& customData = {});

    void SetValueAt(float value, DataSlot type, std::vector<unsigned int> idx);

    // Takes into account broadcasting
    void AdjustToSafeIdx(DataSlot type, std::vector<unsigned int>& idx);

    unsigned int CalcFlatIdx(DataSlot type, const std::vector<unsigned int>& idx);
};

} // namespace armnn