aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h
blob: 8c44ec5750bcad4d06688db68d358afd302ec33c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
/*
 * Copyright (c) 2023-2024 Arm Limited.
 *
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#ifndef ACL_ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUSUB_H
#define ACL_ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUSUB_H

#include "arm_compute/core/Error.h"

namespace arm_compute
{
/** Forward declaration */
class ITensorInfo;

namespace experimental
{
namespace dynamic_fusion
{
/** Forward declaration */
class GpuWorkloadContext;
class GpuWorkloadSketch;

/** Operator interface. */
class GpuSub final
{
public:
    /** Create an operator and fuse it into the workload sketch.
     *    @note If @ref validate_op() fails, the creation also fails and may throw an error.
     *    @note If @ref validate_op() fails, @p sketch remains unchanged and valid.
     *
     * Valid data type configurations:
     * |lhs            |rhs            |dst           |
     * |:--------------|:--------------|:-------------|
     * |F16            |F16            |F16           |
     * |F32            |F32            |F32           |
     *
     * Valid data layouts:
     * - Any
     *
     * @param[in,out] sketch Workload sketch into which the operator will be fused
     * @param[in]     lhs    Left hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
     * @param[in]     rhs    Right hand side tensor info. Same as @p lhs.
     *
     * @return Pointer for the destination tensor info
     */
    static ITensorInfo *create_op(GpuWorkloadSketch &sketch, ITensorInfo *lhs, ITensorInfo *rhs);

    /** Check if the operator configuration is supported, irrespective of fusion
     *
     * @param[in] context Workload context within which the operator is running
     * @param[in] lhs     Left hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
     * @param[in] rhs     Right hand side tensor info. Same as @p lhs.
     *
     * @return Status
     */
    static Status is_supported_op(const GpuWorkloadContext &context, const ITensorInfo *lhs, const ITensorInfo *rhs);

    /** Validate the operator and check if its configuration is supported and if it can be fused into the workload sketch.
     *
     * Parameters are similar to @ref GpuSub::create_op()
     *
     * @return Status
     */
    static Status validate_op(const GpuWorkloadSketch &sketch, const ITensorInfo *rhs, const ITensorInfo *lhs);
};
} // namespace dynamic_fusion
} // namespace experimental
} // namespace arm_compute
#endif // ACL_ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUSUB_H