aboutsummaryrefslogtreecommitdiff
path: root/src/core/gpu/cl/kernels/ClSoftmaxKernel.h
blob: af980eaa8e40e14045460f5845e9d5f6722bf3c9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
/*
 * Copyright (c) 2017-2021 Arm Limited.
 *
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#ifndef ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
#define ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H

#include "arm_compute/core/Error.h"
#include "arm_compute/core/KernelDescriptors.h"
#include "src/core/common/Macros.h"
#include "src/core/gpu/cl/ClCompileContext.h"
#include "src/core/gpu/cl/IClKernel.h"

namespace arm_compute
{
namespace opencl
{
namespace kernels
{
/** Interface for max, shifting, exponentiating and summing the logits */
class ClLogits1DMaxShiftExpSumKernel : public IClKernel
{
    /**< Grid size (obtained through auto-tuning) */
    static const unsigned int _grid_size;
    /**< Vector size in the serial case (obtained through auto-tuning) */
    static const unsigned int _serial_vector_size;
    /**< Vector size in the parallel case (obtained through auto-tuning, enables the best memory access pattern for Bifrost) .*/
    static const unsigned int _parallel_vector_size;

public:
    /** Info for whether a parallel reduction will be run and the vector size of the execution. */
    using ParallelReductionInfo = std::tuple<bool, unsigned int>;

    /** Default constructor */
    ClLogits1DMaxShiftExpSumKernel() = default;
    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogits1DMaxShiftExpSumKernel);
    /** Configure the kernel using the given information about tensors
     *
     * @param[in]     compile_context The compile context to be used.
     * @param[in]     src             Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
     * @param[in,out] max             Max values tensor. Data types supported: same as @p src
     * @param[out]    dst             Destination tensor. Data types supported: same as @p src
     * @param[out]    sum             Sum of 1D logits tensor. Data types supported: same as @p src
     * @param[in]     info            Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
     */
    void configure(const CLCompileContext &compile_context, const ITensorInfo &src, ITensorInfo &max, ITensorInfo &dst, ITensorInfo &sum, const SoftmaxKernelInfo &info);
    /** Static function to check if given info will lead to a valid configuration of @ref ClLogits1DMaxShiftExpSumKernel
     *
     * @param[in] src Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
     * @param[in] max Max values tensor. Data types supported: same as @p src
     * @param[in] dst Destination tensor. Data types supported: same as @p src
     * @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p src
     *
     * @return a status
     */
    static Status validate(const ITensorInfo &src, const ITensorInfo &max, const ITensorInfo &dst, const ITensorInfo &sum);
    /** Checks if the given size is eligible for parallel reduction
     *
     * @note  Serial reduction is launched for width < (_grid_size * _serial_vector_size).
     * @note  Parallel reduction is launched for width >= (_grid_size * _serial_vector_size) and vector_size is forced to 4.
     *
     * @param[in] size Size to check
     *
     * @return A two-element tuple where the first element is a boolean specifying if a parallel reduction will be run,
     *         while the second element is the vector size of the execution.
     */
    static ParallelReductionInfo is_parallel_reduction(size_t size);

    // Inherited methods overridden:
    void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
};

/** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */
class ClLogits1DNormKernel : public IClKernel
{
public:
    /** Default constructor */
    ClLogits1DNormKernel() = default;
    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogits1DNormKernel);

    /** Set the input and output tensors.
     *
     * @param[in]  compile_context The compile context to be used.
     * @param[in]  src             Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
     * @param[in]  sum             Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
     * @param[out] dst             Destination tensor. Data types supported: QASYMM8/QASYMM8_SIGNED for S32 @p input, or same as @p input
     * @param[in]  info            Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
     */
    void configure(const CLCompileContext &compile_context, const ITensorInfo &src, const ITensorInfo &sum, ITensorInfo &dst, const SoftmaxKernelInfo &info);
    /** Static function to check if given info will lead to a valid configuration of @ref ClLogits1DNormKernel
     *
     * @param[in] src  Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
     * @param[in] sum  Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
     * @param[in] dst  Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
     * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
     *
     * @return a status
     */
    static Status validate(const ITensorInfo &src, const ITensorInfo &sum, const ITensorInfo &dst, const SoftmaxKernelInfo &info);

    // Inherited methods overridden:
    void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
};
} // namespace kernels
} // namespace opencl
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H */