diff options
author | Ramy Elgammal <ramy.elgammal@arm.com> | 2023-03-24 11:42:03 +0000 |
---|---|---|
committer | Ramy Elgammal <ramy.elgammal@arm.com> | 2023-04-03 14:57:16 +0000 |
commit | f26ea2f8cc957a1e6faf0361dea805fb2e236061 (patch) | |
tree | ed8acee5615236a1638445d3743230ea7a59c8f5 /src/gpu/cl | |
parent | fff9a4cb56d3d3dbfe85db555eea4bc9b3143996 (diff) | |
download | ComputeLibrary-f26ea2f8cc957a1e6faf0361dea805fb2e236061.tar.gz |
Implement MatMul Function
Resolves: COMPMID-5949
Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com>
Change-Id: Idd8cfe6ea94a14f0b23178f6781251b5f0955563
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9390
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/gpu/cl')
-rw-r--r-- | src/gpu/cl/kernels/ClNativeMatMulKernel.h | 4 | ||||
-rw-r--r-- | src/gpu/cl/operators/ClMatMul.cpp | 80 | ||||
-rw-r--r-- | src/gpu/cl/operators/ClMatMul.h | 84 |
3 files changed, 165 insertions, 3 deletions
diff --git a/src/gpu/cl/kernels/ClNativeMatMulKernel.h b/src/gpu/cl/kernels/ClNativeMatMulKernel.h index 021292a4ae..3d0f18ec84 100644 --- a/src/gpu/cl/kernels/ClNativeMatMulKernel.h +++ b/src/gpu/cl/kernels/ClNativeMatMulKernel.h @@ -24,8 +24,6 @@ #ifndef ACL_SRC_GPU_CL_KERNELS_CLNATIVEMATMULKERNEL #define ACL_SRC_GPU_CL_KERNELS_CLNATIVEMATMULKERNEL -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/KernelDescriptors.h" #include "src/core/common/Macros.h" #include "src/gpu/cl/ClCompileContext.h" @@ -65,7 +63,7 @@ public: void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; private: - bool _export_rhs_to_cl_image { false }; + bool _export_rhs_to_cl_image{ false }; }; } // namespace kernels } // namespace opencl diff --git a/src/gpu/cl/operators/ClMatMul.cpp b/src/gpu/cl/operators/ClMatMul.cpp new file mode 100644 index 0000000000..dadaa1f779 --- /dev/null +++ b/src/gpu/cl/operators/ClMatMul.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/gpu/cl/operators/ClMatMul.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/runtime/CL/CLScheduler.h" +#include "src/common/utils/Log.h" +#include "src/gpu/cl/kernels/ClNativeMatMulKernel.h" + +namespace arm_compute +{ +namespace opencl +{ +using namespace arm_compute::opencl::kernels; +ClMatMul::ClMatMul() + : _native_matmul_kernel(std::make_unique<ClNativeMatMulKernel>()) +{ +} +ClMatMul::~ClMatMul() +{ +} +Status ClMatMul::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulInfo &matmul_info) +{ + MatMulKernelInfo kernel_info; + kernel_info.adj_lhs = matmul_info.adj_lhs(); + kernel_info.adj_rhs = matmul_info.adj_rhs(); + return ClNativeMatMulKernel::validate(lhs, rhs, output, kernel_info); +} +void ClMatMul::configure(const CLCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulInfo &matmul_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output, matmul_info); + + // Perform validation step + ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, output, matmul_info)); + const GPUTarget gpu_target = CLScheduler::get().target(); + + // Placeholder: Getting the heuristics calculated values for M0, N0, K0, and whether to export RHS to texture pipe + + // Filling the MatMul Kernel info + MatMulKernelInfo kernel_info; + kernel_info.adj_lhs = matmul_info.adj_lhs(); + kernel_info.adj_rhs = matmul_info.adj_rhs(); + kernel_info.m0 = 1; // to be properly calculated from heuristics + kernel_info.n0 = 4; // to be properly calculated from heuristics + kernel_info.k0 = 4; // to be properly calculated from heuristics + kernel_info.export_rhs_to_cl_image = false; // to be properly determined from heuristics + + // Set the target for the kernels + _native_matmul_kernel->set_target(gpu_target); + + // Configure the native matrix multiply kernel + _native_matmul_kernel->configure(compile_context, lhs, rhs, output, kernel_info); +} +void ClMatMul::run(ITensorPack &tensors) +{ + CLScheduler::get().enqueue_op(*_native_matmul_kernel, tensors, true); +} +} // namespace opencl +} // namespace arm_compute diff --git a/src/gpu/cl/operators/ClMatMul.h b/src/gpu/cl/operators/ClMatMul.h new file mode 100644 index 0000000000..894b8d5816 --- /dev/null +++ b/src/gpu/cl/operators/ClMatMul.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_SRC_GPU_CL_OPERATORS_ClMatMul +#define ARM_COMPUTE_SRC_GPU_CL_OPERATORS_ClMatMul + +#include "src/gpu/cl/IClOperator.h" +#include "src/gpu/cl/kernels/ClNativeMatMulKernel.h" +#include <memory> + +namespace arm_compute +{ +namespace opencl +{ +/** Basic operator to execute BatchMatMul on OpenCL. This operator calls the following OpenCL kernels: + * + * -# @ref kernels::ClNativeMatMulKernel + */ +class ClMatMul : public IClOperator +{ +public: + /** Constructor */ + ClMatMul(); + ~ClMatMul(); + /** Initialise the kernel's inputs and output + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |lhs |rhs |output | + * |:------------|:------------|:------------| + * |F32 |F32 |F32 | + * |F16 |F16 |F16 | + * + * @note BatchMatMul: Batched Matrix Multiply - [A * B], Multiplies all slices (slice is an element of a batch) of Tensors A and B + * and stores the result in the dst tensor of the same batch size. + * Batch here is number of slices from A and B multiplied at a time, do not confuse with the batch dimension 'N' of NHWC/NCHW + * For NHWC for example: the batch is the higher dimensions H * N, and in general it is H * all higher dimensions. + * @note All tensors must have the same data type. + * + * @param[in] compile_context The compile context to be used. + * @param[in] lhs LHS input tensor info (Matrix A). Data types supported: F16/F32 + * @param[in] rhs RHS input tensor info (Matrix B). Data types supported: same as @p lhs. + * @param[out] output Output tensor info. Data types supported: same as @p lhs + * @param[in] matmul_info Attributes for MatMul + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulInfo &matmul_info); + /** Static function to check if given info will lead to a valid configuration + * + * Similar to @ref ClMatMul::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulInfo &matmul_info); + // Inherited methods overridden: + void run(ITensorPack &tensors) override; + +private: + std::unique_ptr<kernels::ClNativeMatMulKernel> _native_matmul_kernel; +}; +} // namespace opencl +} // namespace arm_compute +#endif // ARM_COMPUTE_SRC_GPU_CL_OPERATORS_ClMatMul |