From a1b1e41bb261f5613f443fed7071936a360686ed Mon Sep 17 00:00:00 2001 From: Mohammed Suhail Munshi Date: Thu, 23 Mar 2023 22:21:31 +0000 Subject: Implement MatMul Function and Operator with Floating Point support for CPU - Implements MatMul function and operator for floating point datatype FP16/FP32 - Includes support for transposing dynamic tensors prior to matrix multiplication. - Adds tests for 2D/3D/4D+ tensors in MatMul with F32/F16 datatype (with all combinations of transposed/not-transposed tensors) - Updates fixture to allow for testing fused activation in MatMul - Adds tests for matmul with and without fused activation Resolved: [COMPMID-5898] Signed-off-by: Mohammed Suhail Munshi Change-Id: Iefa84b26dd723c9a51e6c3f91023152c6c31ace2 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9411 Reviewed-by: SiCong Li Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- arm_compute/core/ITensorInfo.h | 3 + arm_compute/runtime/CL/functions/CLMatMul.h | 12 ++- arm_compute/runtime/NEON/NEFunctions.h | 1 + arm_compute/runtime/NEON/functions/NEMatMul.h | 111 ++++++++++++++++++++++++++ 4 files changed, 125 insertions(+), 2 deletions(-) create mode 100644 arm_compute/runtime/NEON/functions/NEMatMul.h (limited to 'arm_compute') diff --git a/arm_compute/core/ITensorInfo.h b/arm_compute/core/ITensorInfo.h index 1382649e74..7b0fd1c2b7 100644 --- a/arm_compute/core/ITensorInfo.h +++ b/arm_compute/core/ITensorInfo.h @@ -36,6 +36,9 @@ namespace arm_compute { +// Note: Any changes to the fields of the class below that have setters should be mirrored +// (if possible) in the auto_init_if_empty function in AutoConfiguration.h + /** Store the tensor's metadata */ class ITensorInfo : public misc::ICloneable { diff --git a/arm_compute/runtime/CL/functions/CLMatMul.h b/arm_compute/runtime/CL/functions/CLMatMul.h index 56dd9c5655..712bac06bf 100644 --- a/arm_compute/runtime/CL/functions/CLMatMul.h +++ b/arm_compute/runtime/CL/functions/CLMatMul.h @@ -35,6 +35,13 @@ class ITensorInfo; class MatMulInfo; class Status; +/** Settings for MatMul OpenCL implementation */ +class GpuMatMulSettings +{ +public: + /* Placeholder for operator parity between CPU/GPU */ +}; + /** Basic function to execute MatMul (Matrix Multiplication) on OpenCL */ class CLMatMul : public IFunction { @@ -73,13 +80,14 @@ public: * @param[in] rhs RHS input tensor (Matrix B). Data type supported: same as @p lhs. * @param[out] output Output tensor. Data type supported: same as @p lhs. * @param[in] matmul_info Attributes for MatMul + * @param[in] settings Class containing flags for function level settings */ - void configure(const CLCompileContext &compile_context, ICLTensor *rhs, ICLTensor *lhs, ICLTensor *output, const MatMulInfo &matmul_info); + void configure(const CLCompileContext &compile_context, ICLTensor *rhs, ICLTensor *lhs, ICLTensor *output, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings = GpuMatMulSettings{}); /** Initialise the kernel's inputs and output * * Similar to @ref CLMatMul::configure() */ - void configure(ICLTensor *lhs, ICLTensor *rhs, ICLTensor *output, const MatMulInfo &matmul_info); + void configure(ICLTensor *lhs, ICLTensor *rhs, ICLTensor *output, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings = GpuMatMulSettings{}); /** Static function to check if given info will lead to a valid configuration of @ref CLMatMul. * * Similar to @ref CLMatMul::configure() diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h index 8dbe6c59c3..836cba7699 100644 --- a/arm_compute/runtime/NEON/NEFunctions.h +++ b/arm_compute/runtime/NEON/NEFunctions.h @@ -74,6 +74,7 @@ #include "arm_compute/runtime/NEON/functions/NELSTMLayer.h" #include "arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h" #include "arm_compute/runtime/NEON/functions/NELogical.h" +#include "arm_compute/runtime/NEON/functions/NEMatMul.h" #include "arm_compute/runtime/NEON/functions/NEMaxUnpoolingLayer.h" #include "arm_compute/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.h" #include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h" diff --git a/arm_compute/runtime/NEON/functions/NEMatMul.h b/arm_compute/runtime/NEON/functions/NEMatMul.h new file mode 100644 index 0000000000..0f3e3adacc --- /dev/null +++ b/arm_compute/runtime/NEON/functions/NEMatMul.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL +#define ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL + +#include "arm_compute/runtime/IFunction.h" +#include + +namespace arm_compute +{ +/** Settings for MatMul Cpu implementation*/ +class CpuMatMulSettings +{ +public: + // get fast math flag + bool fast_math() const + { + return _fast_math; + } + // Set fast math flag + CpuMatMulSettings &fast_math(bool fmath) + { + _fast_math = fmath; + return *this; + }; + +private: + bool _fast_math{ false }; +}; + +// Forward declarations +class ITensor; +class ITensorInfo; +class MatMulInfo; +class Status; + +/** Basic function to run the following operators: + * + * -# @ref cpu::CpuMatMul + */ +class NEMatMul : public IFunction +{ +public: + /** Constructor */ + NEMatMul(); + /** Destructor */ + ~NEMatMul(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEMatMul(const NEMatMul &) = delete; + /** Default move constructor */ + NEMatMul(NEMatMul &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEMatMul &operator=(const NEMatMul &) = delete; + /** Default move assignment operator */ + NEMatMul &operator=(NEMatMul &&) = default; + /** Initialize + * + * Valid data layouts: + * - Any + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:------------------|:--------------| + * |F32 |F32 |F32 | + * |F16 |F16 |F16 | + * + * @param[in] lhs Input source tensor. + * @param[in] rhs Input source tensor. + * @param[out] output Output tensor. Data type supported: same as @p lhs/rhs + * @param[in] info Class containing flags to transpose lhs/rhs + * @param[in] settings Class containing flags for function level settings i.e fast math + */ + void configure(ITensor *lhs, ITensor *rhs, ITensor *output, const MatMulInfo &info, const CpuMatMulSettings &settings); + /** Static function to check if given info will lead to a valid configuration of @ref NEMatMul + * + * Parameters are similar to @ref NEMatMul::configure() + * + * @return Status + */ + static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulInfo &info, const CpuMatMulSettings &settings); + + // Inherited methods overridden + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; +}; +} +#endif /* ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL */ -- cgit v1.2.1