aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>2023-03-23 22:21:31 +0000
committerMohmun02 <MohammedSuhail.Munshi@arm.com>2023-04-13 09:24:52 +0000
commita1b1e41bb261f5613f443fed7071936a360686ed (patch)
treeeff2978a682fb24c8078df9c6c796fde51074255
parent8b7f42aa0e76a65a4ffa46ee875df6a6220695ae (diff)
downloadComputeLibrary-a1b1e41bb261f5613f443fed7071936a360686ed.tar.gz
Implement MatMul Function and Operator with Floating Point support for CPU
- Implements MatMul function and operator for floating point datatype FP16/FP32 - Includes support for transposing dynamic tensors prior to matrix multiplication. - Adds tests for 2D/3D/4D+ tensors in MatMul with F32/F16 datatype (with all combinations of transposed/not-transposed tensors) - Updates fixture to allow for testing fused activation in MatMul - Adds tests for matmul with and without fused activation Resolved: [COMPMID-5898] Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com> Change-Id: Iefa84b26dd723c9a51e6c3f91023152c6c31ace2 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9411 Reviewed-by: SiCong Li <sicong.li@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp2
-rw-r--r--arm_compute/core/ITensorInfo.h3
-rw-r--r--arm_compute/runtime/CL/functions/CLMatMul.h12
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEMatMul.h111
-rw-r--r--filelist.json10
-rw-r--r--src/BUILD.bazel4
-rw-r--r--src/CMakeLists.txt6
-rw-r--r--src/core/helpers/AutoConfiguration.h19
-rw-r--r--src/cpu/operators/CpuMatMul.cpp226
-rw-r--r--src/cpu/operators/CpuMatMul.h115
-rw-r--r--src/cpu/operators/internal/CpuGemmAssemblyDispatch.h34
-rw-r--r--src/runtime/CL/functions/CLMatMul.cpp6
-rw-r--r--src/runtime/NEON/functions/NEMatMul.cpp75
-rw-r--r--tests/validation/CL/MatMul.cpp3
-rw-r--r--tests/validation/NEON/MatMul.cpp205
-rw-r--r--tests/validation/fixtures/MatMulFixture.h147
-rw-r--r--utils/TypePrinter.h30
18 files changed, 957 insertions, 52 deletions
diff --git a/Android.bp b/Android.bp
index a08bab6aac..e38ea65d55 100644
--- a/Android.bp
+++ b/Android.bp
@@ -585,6 +585,7 @@ cc_library_static {
"src/cpu/operators/CpuGemmDirectConv2d.cpp",
"src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp",
"src/cpu/operators/CpuGemmLowpOutputStage.cpp",
+ "src/cpu/operators/CpuMatMul.cpp",
"src/cpu/operators/CpuMaxUnpooling.cpp",
"src/cpu/operators/CpuMul.cpp",
"src/cpu/operators/CpuPermute.cpp",
@@ -940,6 +941,7 @@ cc_library_static {
"src/runtime/NEON/functions/NELSTMLayer.cpp",
"src/runtime/NEON/functions/NELSTMLayerQuantized.cpp",
"src/runtime/NEON/functions/NELogical.cpp",
+ "src/runtime/NEON/functions/NEMatMul.cpp",
"src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp",
"src/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.cpp",
"src/runtime/NEON/functions/NENormalizationLayer.cpp",
diff --git a/arm_compute/core/ITensorInfo.h b/arm_compute/core/ITensorInfo.h
index 1382649e74..7b0fd1c2b7 100644
--- a/arm_compute/core/ITensorInfo.h
+++ b/arm_compute/core/ITensorInfo.h
@@ -36,6 +36,9 @@
namespace arm_compute
{
+// Note: Any changes to the fields of the class below that have setters should be mirrored
+// (if possible) in the auto_init_if_empty function in AutoConfiguration.h
+
/** Store the tensor's metadata */
class ITensorInfo : public misc::ICloneable<ITensorInfo>
{
diff --git a/arm_compute/runtime/CL/functions/CLMatMul.h b/arm_compute/runtime/CL/functions/CLMatMul.h
index 56dd9c5655..712bac06bf 100644
--- a/arm_compute/runtime/CL/functions/CLMatMul.h
+++ b/arm_compute/runtime/CL/functions/CLMatMul.h
@@ -35,6 +35,13 @@ class ITensorInfo;
class MatMulInfo;
class Status;
+/** Settings for MatMul OpenCL implementation */
+class GpuMatMulSettings
+{
+public:
+ /* Placeholder for operator parity between CPU/GPU */
+};
+
/** Basic function to execute MatMul (Matrix Multiplication) on OpenCL */
class CLMatMul : public IFunction
{
@@ -73,13 +80,14 @@ public:
* @param[in] rhs RHS input tensor (Matrix B). Data type supported: same as @p lhs.
* @param[out] output Output tensor. Data type supported: same as @p lhs.
* @param[in] matmul_info Attributes for MatMul
+ * @param[in] settings Class containing flags for function level settings
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *rhs, ICLTensor *lhs, ICLTensor *output, const MatMulInfo &matmul_info);
+ void configure(const CLCompileContext &compile_context, ICLTensor *rhs, ICLTensor *lhs, ICLTensor *output, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings = GpuMatMulSettings{});
/** Initialise the kernel's inputs and output
*
* Similar to @ref CLMatMul::configure()
*/
- void configure(ICLTensor *lhs, ICLTensor *rhs, ICLTensor *output, const MatMulInfo &matmul_info);
+ void configure(ICLTensor *lhs, ICLTensor *rhs, ICLTensor *output, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings = GpuMatMulSettings{});
/** Static function to check if given info will lead to a valid configuration of @ref CLMatMul.
*
* Similar to @ref CLMatMul::configure()
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 8dbe6c59c3..836cba7699 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -74,6 +74,7 @@
#include "arm_compute/runtime/NEON/functions/NELSTMLayer.h"
#include "arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h"
#include "arm_compute/runtime/NEON/functions/NELogical.h"
+#include "arm_compute/runtime/NEON/functions/NEMatMul.h"
#include "arm_compute/runtime/NEON/functions/NEMaxUnpoolingLayer.h"
#include "arm_compute/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.h"
#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h"
diff --git a/arm_compute/runtime/NEON/functions/NEMatMul.h b/arm_compute/runtime/NEON/functions/NEMatMul.h
new file mode 100644
index 0000000000..0f3e3adacc
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEMatMul.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL
+#define ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL
+
+#include "arm_compute/runtime/IFunction.h"
+#include <memory>
+
+namespace arm_compute
+{
+/** Settings for MatMul Cpu implementation*/
+class CpuMatMulSettings
+{
+public:
+ // get fast math flag
+ bool fast_math() const
+ {
+ return _fast_math;
+ }
+ // Set fast math flag
+ CpuMatMulSettings &fast_math(bool fmath)
+ {
+ _fast_math = fmath;
+ return *this;
+ };
+
+private:
+ bool _fast_math{ false };
+};
+
+// Forward declarations
+class ITensor;
+class ITensorInfo;
+class MatMulInfo;
+class Status;
+
+/** Basic function to run the following operators:
+ *
+ * -# @ref cpu::CpuMatMul
+ */
+class NEMatMul : public IFunction
+{
+public:
+ /** Constructor */
+ NEMatMul();
+ /** Destructor */
+ ~NEMatMul();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEMatMul(const NEMatMul &) = delete;
+ /** Default move constructor */
+ NEMatMul(NEMatMul &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEMatMul &operator=(const NEMatMul &) = delete;
+ /** Default move assignment operator */
+ NEMatMul &operator=(NEMatMul &&) = default;
+ /** Initialize
+ *
+ * Valid data layouts:
+ * - Any
+ *
+ * Valid data type configurations:
+ * |src0 |src1 |dst |
+ * |:--------------|:------------------|:--------------|
+ * |F32 |F32 |F32 |
+ * |F16 |F16 |F16 |
+ *
+ * @param[in] lhs Input source tensor.
+ * @param[in] rhs Input source tensor.
+ * @param[out] output Output tensor. Data type supported: same as @p lhs/rhs
+ * @param[in] info Class containing flags to transpose lhs/rhs
+ * @param[in] settings Class containing flags for function level settings i.e fast math
+ */
+ void configure(ITensor *lhs, ITensor *rhs, ITensor *output, const MatMulInfo &info, const CpuMatMulSettings &settings);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEMatMul
+ *
+ * Parameters are similar to @ref NEMatMul::configure()
+ *
+ * @return Status
+ */
+ static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulInfo &info, const CpuMatMulSettings &settings);
+
+ // Inherited methods overridden
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
+};
+}
+#endif /* ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL */
diff --git a/filelist.json b/filelist.json
index 01659f55cf..cf1c63b883 100644
--- a/filelist.json
+++ b/filelist.json
@@ -1534,9 +1534,11 @@
"src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp",
"src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp",
"src/cpu/operators/CpuGemm.cpp",
+ "src/cpu/operators/CpuMatMul.cpp",
"src/cpu/operators/CpuGemmLowpOutputStage.cpp",
"src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp",
"src/runtime/NEON/functions/NEGEMM.cpp",
+ "src/runtime/NEON/functions/NEMatMul.cpp",
"src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp",
"src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp"
],
@@ -1856,6 +1858,14 @@
}
}
},
+ "MatMul" : {
+ "files": {
+ "common": [
+ "src/cpu/operators/CpuMatMul.cpp",
+ "src/runtime/NEON/functions/NEMatMul.cpp"
+ ]
+ }
+ },
"Mul": {
"files": {
"common": [
diff --git a/src/BUILD.bazel b/src/BUILD.bazel
index 279c52e151..26acc14a68 100644
--- a/src/BUILD.bazel
+++ b/src/BUILD.bazel
@@ -765,10 +765,10 @@ filegroup(
"cpu/kernels/instancenorm/generic/neon/impl.cpp",
"cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp",
"cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp",
- "cpu/kernels/lut/generic/neon/u8.cpp",
"cpu/kernels/l2normlayer/generic/neon/fp16.cpp",
"cpu/kernels/l2normlayer/generic/neon/fp32.cpp",
"cpu/kernels/l2normlayer/generic/neon/impl.cpp",
+ "cpu/kernels/lut/generic/neon/u8.cpp",
"cpu/kernels/maxunpool/generic/neon/fp16.cpp",
"cpu/kernels/maxunpool/generic/neon/fp32.cpp",
"cpu/kernels/maxunpool/generic/neon/impl.cpp",
@@ -837,6 +837,7 @@ filegroup(
"cpu/operators/CpuGemmDirectConv2d.cpp",
"cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp",
"cpu/operators/CpuGemmLowpOutputStage.cpp",
+ "cpu/operators/CpuMatMul.cpp",
"cpu/operators/CpuMaxUnpooling.cpp",
"cpu/operators/CpuMul.cpp",
"cpu/operators/CpuPermute.cpp",
@@ -921,6 +922,7 @@ filegroup(
"runtime/NEON/functions/NELSTMLayer.cpp",
"runtime/NEON/functions/NELSTMLayerQuantized.cpp",
"runtime/NEON/functions/NELogical.cpp",
+ "runtime/NEON/functions/NEMatMul.cpp",
"runtime/NEON/functions/NEMaxUnpoolingLayer.cpp",
"runtime/NEON/functions/NEMeanStdDevNormalizationLayer.cpp",
"runtime/NEON/functions/NENormalizationLayer.cpp",
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 92c888056e..336d2cd5cc 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -757,10 +757,10 @@ target_sources(
cpu/kernels/instancenorm/generic/neon/impl.cpp
cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp
cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp
- cpu/kernels/lut/generic/neon/u8.cpp
cpu/kernels/l2normlayer/generic/neon/fp16.cpp
cpu/kernels/l2normlayer/generic/neon/fp32.cpp
cpu/kernels/l2normlayer/generic/neon/impl.cpp
+ cpu/kernels/lut/generic/neon/u8.cpp
cpu/kernels/maxunpool/generic/neon/fp16.cpp
cpu/kernels/maxunpool/generic/neon/fp32.cpp
cpu/kernels/maxunpool/generic/neon/impl.cpp
@@ -829,6 +829,7 @@ target_sources(
cpu/operators/CpuGemmDirectConv2d.cpp
cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
cpu/operators/CpuGemmLowpOutputStage.cpp
+ cpu/operators/CpuMatMul.cpp
cpu/operators/CpuMaxUnpooling.cpp
cpu/operators/CpuMul.cpp
cpu/operators/CpuPermute.cpp
@@ -913,6 +914,7 @@ target_sources(
runtime/NEON/functions/NELSTMLayer.cpp
runtime/NEON/functions/NELSTMLayerQuantized.cpp
runtime/NEON/functions/NELogical.cpp
+ runtime/NEON/functions/NEMatMul.cpp
runtime/NEON/functions/NEMaxUnpoolingLayer.cpp
runtime/NEON/functions/NEMeanStdDevNormalizationLayer.cpp
runtime/NEON/functions/NENormalizationLayer.cpp
@@ -960,4 +962,4 @@ target_sources(
runtime/Tensor.cpp
runtime/TensorAllocator.cpp
runtime/Utils.cpp
-)
+) \ No newline at end of file
diff --git a/src/core/helpers/AutoConfiguration.h b/src/core/helpers/AutoConfiguration.h
index 6880a6cb66..18ffbd6295 100644
--- a/src/core/helpers/AutoConfiguration.h
+++ b/src/core/helpers/AutoConfiguration.h
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2020 Arm Limited.
+* Copyright (c) 2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -57,12 +57,16 @@ inline bool auto_init_if_empty(ITensorInfo &info,
}
/** Auto initialize the tensor info using another tensor info.
-*
-* @param info_sink Tensor info used to check and assign
-* @param info_source Tensor info used to assign
-*
-* @return True if the tensor info has been initialized
-*/
+ *
+ * (COMPMID-6012) This method should remain in sync with the fields of ITensorInfo that have setters.
+ *
+ *
+ * @param info_sink Tensor info used to check and assign
+ * @param info_source Tensor info used to assign
+ *
+ *
+ * @return True if the tensor info has been initialized
+ */
inline bool auto_init_if_empty(ITensorInfo &info_sink, const ITensorInfo &info_source)
{
if(info_sink.tensor_shape().total_size() == 0)
@@ -72,6 +76,7 @@ inline bool auto_init_if_empty(ITensorInfo &info_sink, const ITensorInfo &info_s
info_sink.set_tensor_shape(info_source.tensor_shape());
info_sink.set_quantization_info(info_source.quantization_info());
info_sink.set_data_layout(info_source.data_layout());
+ info_sink.set_are_values_constant(info_source.are_values_constant());
return true;
}
diff --git a/src/cpu/operators/CpuMatMul.cpp b/src/cpu/operators/CpuMatMul.cpp
new file mode 100644
index 0000000000..b5359e51af
--- /dev/null
+++ b/src/cpu/operators/CpuMatMul.cpp
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/operators/CpuMatMul.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "arm_compute/runtime/NEON/functions/NEMatMul.h"
+#include "src/common/utils/Log.h"
+#include "src/core/CPP/Validate.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/cpu/utils/CpuAuxTensorHandler.h"
+
+using namespace arm_compute::experimental;
+
+namespace arm_compute
+{
+namespace cpu
+{
+CpuMatMul::CpuMatMul()
+ : _transpose_kernel_lhs(), _transpose_kernel_rhs(), _asm_glue(), _lhs_transposed(), _rhs_transposed(), _original_lhs_shape(), _original_rhs_shape(), _original_dst_shape()
+{
+}
+
+Status CpuMatMul::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulInfo &info, const CpuMatMulSettings &settings)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->are_values_constant(), "LHS Tensor must be dynamic.");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs->are_values_constant(), "RHS Tensor must be dynamic.");
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(lhs);
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(lhs);
+
+ const auto adj_lhs = info.adj_lhs();
+ const auto adj_rhs = info.adj_rhs();
+
+ const ITensorInfo *lhs_to_use = lhs;
+ const ITensorInfo *rhs_to_use = rhs;
+ TensorInfo lhs_transposed{};
+ TensorInfo rhs_transposed{};
+
+ auto gemm_info = AsmGemmInfo();
+ gemm_info.activation_info = info.fused_activation();
+ gemm_info.fast_mode = settings.fast_math();
+
+ // Validate and then permute a/b
+ if(adj_lhs)
+ {
+ auto_init_if_empty(lhs_transposed, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_transposed_shape(*lhs)));
+ ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuTransposeKernel::validate(lhs_to_use, &lhs_transposed));
+ // Assign lhs_to_use pointer to use transposed TensorInfo
+ lhs_to_use = &lhs_transposed;
+ }
+ if(adj_rhs)
+ {
+ auto_init_if_empty(rhs_transposed, rhs->clone()->set_tensor_shape(misc::shape_calculator::compute_transposed_shape(*rhs)));
+ ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuTransposeKernel::validate(rhs_to_use, &rhs_transposed));
+ // Assign rhs_to_use pointer to use transposed TensorInfo
+ rhs_to_use = &rhs_transposed;
+ }
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_to_use->dimension(0) != rhs_to_use->dimension(1),
+ "The product AB is defined only if the number of columns in A is equal to the number of rows in B (after transpose)");
+
+ if(lhs_to_use->num_dimensions() > 2)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_to_use->num_dimensions() != rhs_to_use->num_dimensions(), "Broadcasting in Batch dimension is unsupported by this operator.");
+ }
+
+ // Iterate over dimensions to be collapsed in operator - check dimensions are equivelent between tensors
+ for(unsigned int i = 2; i < lhs_to_use->num_dimensions(); i++)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_to_use->dimension(i) != rhs_to_use->dimension(i), "Broadcasting in Batch dimension is unsupported by this operator.");
+ }
+
+ cpu::CpuGemmAssemblyDispatch::validate(lhs_to_use, rhs_to_use, nullptr, dst, gemm_info);
+
+ return Status{};
+}
+
+void CpuMatMul::configure(ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulInfo &info, const CpuMatMulSettings &settings)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst);
+ ARM_COMPUTE_LOG_PARAMS(lhs, rhs, dst, info, settings);
+ ARM_COMPUTE_ERROR_THROW_ON(CpuMatMul::validate(lhs, rhs, dst, info, settings));
+
+ _adj_lhs = info.adj_lhs();
+ _adj_rhs = info.adj_rhs();
+ _fast_math = settings.fast_math();
+
+ // 1. Create and reshape tensors
+ // ------------------------------------------------------
+ // a. Clone TensorInfo to prevent changing original tensor values during setup
+ // b. Change shape of lhs/dst to [x, y, 1, collapsed(z)] to match assembly kernel configuration
+ // c. For rhs collapse all dimensions larger than 3 to z dimension
+ TensorInfo lhs_to_use = *lhs->clone();
+ TensorInfo dst_to_use = *dst->clone();
+ TensorInfo rhs_to_use = *rhs->clone();
+
+ // Save starting shape of tensors
+ _original_lhs_shape = lhs_to_use.tensor_shape();
+ _original_dst_shape = dst_to_use.tensor_shape();
+ _original_rhs_shape = rhs_to_use.tensor_shape();
+
+ // Reshape lhs for use with assembly kernels.
+ lhs_to_use.set_tensor_shape(TensorShape(_original_lhs_shape.x(), _original_lhs_shape.y(), 1, _original_lhs_shape.collapsed_from(2).z()));
+ dst_to_use.set_tensor_shape(TensorShape(_original_dst_shape.x(), _original_dst_shape.y(), 1, _original_dst_shape.collapsed_from(2).z()));
+ rhs_to_use.set_tensor_shape(_original_rhs_shape.collapsed_from(2));
+
+ // 2. Configuration for transpose of lhs/rhs
+ // ------------------------------------------------------
+ // Initialise transposed TensorInfo class for aux tensors (intermediary tensors)
+ if(_adj_lhs)
+ {
+ // Setup transpose LHS
+ _transpose_kernel_lhs = std::make_unique<cpu::kernels::CpuTransposeKernel>();
+ _transpose_kernel_lhs->configure(&lhs_to_use, &_lhs_transposed);
+ }
+
+ if(_adj_rhs)
+ {
+ // Setup transpose RHS
+ _transpose_kernel_rhs = std::make_unique<cpu::kernels::CpuTransposeKernel>();
+ _transpose_kernel_rhs->configure(&rhs_to_use, &_rhs_transposed);
+ }
+
+ // 3. Configure assembly kernel using transposed tensors.
+ // -----------------------------------------------------
+ // Use transposed tensors if the corresponding transpose flags are set
+ // Fill AsmGemmInfo class object before configuration
+ _gemm_info.activation_info = info.fused_activation();
+ _gemm_info.fast_mode = settings.fast_math();
+
+ lhs_to_use = (_adj_lhs) ? _lhs_transposed : lhs_to_use;
+ rhs_to_use = (_adj_rhs) ? _rhs_transposed : rhs_to_use;
+
+ // Configure Asm Kernel
+ _asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
+ _asm_glue->configure(&lhs_to_use, &rhs_to_use, nullptr, &dst_to_use, _gemm_info); // c is nullptr as bias not supported in MatMul
+
+ // Specify memory requirements for intermediate tensors
+ auto asm_mem_req = _asm_glue->workspace();
+ // Specify memory required by gemm kernel
+ int idx = 0;
+ for(const auto &aux : asm_mem_req)
+ {
+ _aux_mem[idx] = aux;
+ idx++;
+ }
+ // Memory requirements for transposed tensors
+ _aux_mem[TransposeLHS] = MemoryInfo(offset_int_vec(TransposeLHS), MemoryLifetime::Temporary, lhs->total_size());
+ _aux_mem[TransposeRHS] = MemoryInfo(offset_int_vec(TransposeRHS), MemoryLifetime::Temporary, rhs->total_size());
+}
+
+void CpuMatMul::run(ITensorPack &tensors)
+{
+ // Retrieve tensors from tensor pack
+ auto lhs = tensors.get_tensor(ACL_SRC_0);
+ auto rhs = tensors.get_const_tensor(ACL_SRC_1);
+ auto dst = tensors.get_tensor(ACL_DST);
+
+ // Reshape LHS and DST to ensure compatibility with GEMM asm kernel (Batch dimensions is 4th for lhs and dst within asm)
+ // Collapse RHS (necessary to support dimensions larger than 3 in gemm assembly)
+ lhs->info()->set_tensor_shape(TensorShape(_original_lhs_shape.x(), _original_lhs_shape.y(), 1, _original_lhs_shape.collapsed_from(2).z())); // Collapsed 3+ dimensions into z
+ dst->info()->set_tensor_shape(TensorShape(_original_dst_shape.x(), _original_dst_shape.y(), 1, _original_dst_shape.collapsed_from(2).z())); // Collapsed 3+ dimensions into z
+ rhs->info()->set_tensor_shape(_original_rhs_shape.collapsed_from(2));
+
+ // Initialise object to handle stored transposed tensors in auxillary memory
+ CpuAuxTensorHandler lhs_transposed(offset_int_vec(TransposeLHS), _lhs_transposed, tensors, true);
+ CpuAuxTensorHandler rhs_transposed(offset_int_vec(TransposeRHS), _rhs_transposed, tensors, true);
+
+ // Create tensor pack for asm kernel
+ ITensorPack asm_tensors(tensors);
+
+ // Run transpose lhs if necessary
+ if(_adj_lhs)
+ {
+ ITensorPack lhs_transpose_pack = { { TensorType::ACL_SRC, lhs }, { TensorType::ACL_DST, lhs_transposed.get() } };
+ NEScheduler::get().schedule_op(_transpose_kernel_lhs.get(), Window::DimY, _transpose_kernel_lhs->window(), lhs_transpose_pack);
+ asm_tensors.add_const_tensor(TensorType::ACL_SRC_0, lhs_transposed.get());
+ }
+ // Run transpose rhs if necessary
+ if(_adj_rhs)
+ {
+ ITensorPack rhs_transpose_pack = { { TensorType::ACL_SRC, rhs }, { TensorType::ACL_DST, rhs_transposed.get() } };
+ NEScheduler::get().schedule_op(_transpose_kernel_rhs.get(), Window::DimY, _transpose_kernel_rhs->window(), rhs_transpose_pack);
+ asm_tensors.add_const_tensor(TensorType::ACL_SRC_1, rhs_transposed.get());
+ }
+ // Run asm kernel
+ _asm_glue->run(asm_tensors);
+
+ // Undo reshape of tensors
+ dst->info()->set_tensor_shape(_original_dst_shape);
+ lhs->info()->set_tensor_shape(_original_lhs_shape);
+ rhs->info()->set_tensor_shape(_original_rhs_shape);
+}
+
+experimental::MemoryRequirements CpuMatMul::workspace() const
+{
+ return _aux_mem;
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/operators/CpuMatMul.h b/src/cpu/operators/CpuMatMul.h
new file mode 100644
index 0000000000..ae6345141e
--- /dev/null
+++ b/src/cpu/operators/CpuMatMul.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CPU_OPERATORS_CPUMATMUL
+#define SRC_CPU_OPERATORS_CPUMATMUL
+
+#include "arm_compute/core/TensorInfo.h"
+#include "src/core/common/Macros.h"
+#include "src/cpu/ICpuOperator.h"
+#include "src/cpu/kernels/CpuTransposeKernel.h"
+#include "src/cpu/operators/internal/CpuGemmAssemblyDispatch.h"
+
+namespace arm_compute
+{
+// Forward Declarations
+class MatMulInfo;
+class CpuMatMulSettings;
+
+namespace cpu
+{
+/** Function to execute MatMul Operation. This function calls the following functions/kernels:
+ *
+ * If adjoint/adj flag is enabled for either input lhs or rhs (or both) :
+ * -# @ref cpu::kernels::CpuTransposeKernel
+ * Then :
+ * -# @ref cpu::CpuGemmAssemblyDispatch
+ */
+class CpuMatMul : public ICpuOperator
+{
+public:
+ /* Constructor */
+ CpuMatMul();
+ /* Destructor */
+ ~CpuMatMul() = default;
+
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuMatMul);
+ /** Configure operator for a given list of arguments
+ *
+ * Note: Check documentation of @ref NEMatMul for a list of supported datatypes and layouts
+ *
+ *
+ * @param[in] lhs Source tensor info.
+ * @param[in] rhs Source tensor info.
+ * @param[out] dst Destination tensor info. Data types supported: same as @p lhs / @p rhs.
+ * @param[in] info Contains MatMul operation information described in @ref MatMulInfo.
+ * @param[in] settings The settings for matmul operation (i.e fast math)
+ */
+ void configure(ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulInfo &info, const CpuMatMulSettings &settings);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to CpuMatMul::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulInfo &info, const CpuMatMulSettings &settings);
+
+ // Inherited methods overridden:
+ void run(ITensorPack &tensors) override;
+ experimental::MemoryRequirements workspace() const override;
+
+private:
+ enum InternalTensorIdx
+ {
+ AsmGemmWorkspace = 0, // Pre-allocate workspace tensors for CpuGemmAssemblyDispatch
+ PretransposeRHS, // Pre-allocate workspace tensors for CpuGemmAssemblyDispatch
+ TransposeLHS,
+ TransposeRHS,
+ Count
+ };
+
+ // Define unique pointers to kernels/operators used by matmul
+ std::unique_ptr<kernels::CpuTransposeKernel> _transpose_kernel_lhs{ nullptr };
+ std::unique_ptr<kernels::CpuTransposeKernel> _transpose_kernel_rhs{ nullptr };
+ std::unique_ptr<CpuGemmAssemblyDispatch> _asm_glue{ nullptr };
+
+ // TensorInfo for tensors stored in auxillary memory
+ TensorInfo _lhs_transposed{};
+ TensorInfo _rhs_transposed{};
+
+ // Original tensor shapes prior to reshaping tensors and collapsing dimensions
+ TensorShape _original_lhs_shape{};
+ TensorShape _original_rhs_shape{};
+ TensorShape _original_dst_shape{};
+
+ // Note : adj_lhs means the same as transposing lhs
+ bool _adj_lhs{ false };
+ bool _adj_rhs{ false };
+ bool _fast_math{ false };
+ AsmGemmInfo _gemm_info{};
+ experimental::MemoryRequirements _aux_mem{ Count };
+};
+}
+}
+
+#endif /* SRC_CPU_OPERATORS_CPUMATMUL */
diff --git a/src/cpu/operators/internal/CpuGemmAssemblyDispatch.h b/src/cpu/operators/internal/CpuGemmAssemblyDispatch.h
index 0c51c92359..588c45294a 100644
--- a/src/cpu/operators/internal/CpuGemmAssemblyDispatch.h
+++ b/src/cpu/operators/internal/CpuGemmAssemblyDispatch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022 Arm Limited.
+ * Copyright (c) 2018-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,6 +82,38 @@ public:
public:
/** If supported create a Compute Library function else fallback to the arm_gemm function.
*
+ * @note Configuring "batches"
+ * The shapes of @p a @p b and @p d are arranged as follows:
+ * Lowest dimension <-> Highest dimension
+ * a: [K, M, Batch, Multi]
+ * b: [N, K, Multi]
+ * d: [N, M, Batch, Multi]
+ *
+ * The "Batch" refers to where "Batch" number of MxK slices of tensor a multiplies with a single KxN slice of b
+ * The "Multi" refers to where "Multi" number of individual multiplication of a with b
+ *
+ * E.g. the following are some example input shape configurations
+ *
+ * (1) Normal 2D gemm
+ * a: [K=3, M=4]
+ * b: [N=5, K=3]
+ * d: [N=5, M=4]
+ *
+ * (2) Batches of a sharing b (e.g. gemm-based batched convolution where b is the shared )
+ * a: [K=3, M=4, Batch=9]
+ * b: [N=5, K=3]
+ * d: [N=5, M=4, Batch=9]
+ *
+ * (3) "Batches" of independent gemm (e.g. batched matmul)
+ * a: [K=3, M=4, Batch=1, Multi=7]
+ * b: [N=5, K=3, Multi=7]
+ * d: [N=5, M=4, Batch=1, Multi=7]
+ *
+ * (4) "Batches" of independent gemm where b is also shared
+ * a: [K=3, M=4, Batch=4, Multi=7]
+ * b: [N=5, K=3, Multi=7]
+ * d: [N=5, M=4, Batch=4, Multi=7]
+ *
* @param[in] a Input tensor (Matrix A)
* @param[in] b Input tensor (Matrix B)
* @param[in] c Input tensor (Matrix C) used to pass the bias for quantized calculations
diff --git a/src/runtime/CL/functions/CLMatMul.cpp b/src/runtime/CL/functions/CLMatMul.cpp
index f42e4ff309..ae5a01f679 100644
--- a/src/runtime/CL/functions/CLMatMul.cpp
+++ b/src/runtime/CL/functions/CLMatMul.cpp
@@ -42,14 +42,16 @@ CLMatMul::CLMatMul()
CLMatMul::~CLMatMul() = default;
-void CLMatMul::configure(ICLTensor *lhs, ICLTensor *rhs, ICLTensor *output, const MatMulInfo &matmul_info)
+void CLMatMul::configure(ICLTensor *lhs, ICLTensor *rhs, ICLTensor *output, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings)
{
+ ARM_COMPUTE_UNUSED(settings);
configure(CLKernelLibrary::get().get_compile_context(), lhs, rhs, output, matmul_info);
}
-void CLMatMul::configure(const CLCompileContext &compile_context, ICLTensor *lhs, ICLTensor *rhs, ICLTensor *output, const MatMulInfo &matmul_info)
+void CLMatMul::configure(const CLCompileContext &compile_context, ICLTensor *lhs, ICLTensor *rhs, ICLTensor *output, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output);
+ ARM_COMPUTE_UNUSED(settings);
_impl->op = std::make_unique<OperatorType>();
_impl->op->configure(compile_context, lhs->info(), rhs->info(), output->info(), matmul_info);
diff --git a/src/runtime/NEON/functions/NEMatMul.cpp b/src/runtime/NEON/functions/NEMatMul.cpp
new file mode 100644
index 0000000000..0c46516f1e
--- /dev/null
+++ b/src/runtime/NEON/functions/NEMatMul.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NEMatMul.h"
+
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/cpu/operators/CpuMatMul.h"
+#include "arm_compute/runtime/Tensor.h"
+
+namespace arm_compute
+{
+struct NEMatMul::Impl
+{
+ const ITensor *lhs{ nullptr };
+ const ITensor *rhs{ nullptr };
+ ITensor *output{ nullptr };
+ std::unique_ptr<cpu::CpuMatMul> op{ nullptr };
+ MemoryGroup memory_group{};
+ WorkspaceData<Tensor> workspace_tensors{};
+ ITensorPack run_pack{};
+};
+
+NEMatMul::NEMatMul()
+ : _impl(std::make_unique<Impl>())
+{
+}
+
+NEMatMul::~NEMatMul() = default;
+
+void NEMatMul::configure(ITensor *lhs, ITensor *rhs, ITensor *output, const MatMulInfo &info, const CpuMatMulSettings &settings)
+{
+ _impl->lhs = lhs;
+ _impl->rhs = rhs;
+ _impl->output = output;
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(_impl->lhs, _impl->rhs, _impl->output);
+ _impl->op = std::make_unique<cpu::CpuMatMul>();
+ _impl->op->configure(lhs->info(), rhs->info(), output->info(), info, settings);
+ _impl->run_pack = { { ACL_SRC_0, lhs }, { ACL_SRC_1, rhs }, { ACL_DST, output } };
+ _impl->workspace_tensors = manage_workspace<Tensor>(_impl->op->workspace(), _impl->memory_group, _impl->run_pack);
+}
+
+Status NEMatMul::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulInfo &info, const CpuMatMulSettings &settings)
+{
+ return cpu::CpuMatMul::validate(lhs, rhs, output, info, settings);
+}
+
+void NEMatMul::run()
+{
+ MemoryGroupResourceScope scope_mg(_impl->memory_group);
+ _impl->op->run(_impl->run_pack);
+}
+} // namespace arm_compute
diff --git a/tests/validation/CL/MatMul.cpp b/tests/validation/CL/MatMul.cpp
index 7ebed43d97..7c1d16008f 100644
--- a/tests/validation/CL/MatMul.cpp
+++ b/tests/validation/CL/MatMul.cpp
@@ -46,8 +46,9 @@ constexpr float abs_tolerance_f16(
0.001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for fp16 data type in case using relative tolerance fails because of small values */
RelativeTolerance<half_float::half> tolerance_f16(half(0.01)); /**< Tolerance value for comparing reference's output against implementation's output for fp16 data type */
} // namespace
+
template <typename T>
-using MatMulFixture = MatMulValidationFixture<CLTensor, CLAccessor, CLMatMul, T>;
+using MatMulFixture = MatMulValidationFixture<CLTensor, CLAccessor, CLMatMul, GpuMatMulSettings, T>;
TEST_SUITE(CL)
TEST_SUITE(MatMul)
diff --git a/tests/validation/NEON/MatMul.cpp b/tests/validation/NEON/MatMul.cpp
new file mode 100644
index 0000000000..3bfbc16e71
--- /dev/null
+++ b/tests/validation/NEON/MatMul.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEMatMul.h"
+
+#include "tests/NEON/Accessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+
+#include "tests/datasets/LargeMatMulDataset.h"
+#include "tests/datasets/SmallMatMulDataset.h"
+#include "tests/validation/fixtures/MatMulFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(NEON)
+TEST_SUITE(MatMul)
+
+constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */
+const AbsoluteTolerance<half> tolerance_fp16(half(0.1f));
+
+// clang-format off
+// *INDENT-OFF*
+// Validation Tests
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+ framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Mismatching datatype
+ TensorInfo(TensorShape(9U, 6U), 1, DataType::S32), // Unsupported datatypes
+ TensorInfo(TensorShape(9U, 6U, 2U), 1, DataType::F32), // Broadcasting in batch dimension not supported
+ TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Invalid shape for multiplication
+ TensorInfo(TensorShape(9U, 6U), 1, DataType::F32),
+ TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32),
+ TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32), // Tensors are not dynamic
+ }),
+ framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(5U, 9U), 1, DataType::S32),
+ TensorInfo(TensorShape(5U, 9U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 12U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 9U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32),
+ })),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 6U), 1, DataType::S32),
+ TensorInfo(TensorShape(5U, 6U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32),
+ })),
+ framework::dataset::make( "TensorIsConst", {false, false, false, false, false , false, true} )),
+ framework::dataset::make("Expected", { false, false, false, false, true, true, false })),
+ a_info, b_info, output_info, are_tensors_const, expected)
+{
+ TensorInfo a{a_info};
+ TensorInfo b{b_info};
+ a.set_are_values_constant(are_tensors_const);
+ b.set_are_values_constant(are_tensors_const);
+ Status status = NEMatMul::validate(&a,
+ &b,
+ &output_info,
+ MatMulInfo(),
+ CpuMatMulSettings());
+ ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
+}
+// *INDENT-ON*
+// clang-format on
+
+// Generic Template
+template <typename T>
+using NEMatMulFixture = MatMulValidationWithActivationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
+
+// Fast math Template
+template <typename T>
+using NEMatMulFastMathFixture = MatMulGenericValidationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
+
+template <typename T>
+using NEMatMulDynamicTensorsFixture = MatMulValidationWithDynamicTensorsFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunHighDimensions, NEMatMulFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::HighDimensionalMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
+ framework::dataset::make("NumberOfRuns", 5)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+TEST_SUITE_END() // FP32
+
+#ifdef ARM_COMPUTE_ENABLE_BF16
+/* Note : MatMul BF16 is enabled by specifying FP32 datatype and enabling the fast math setting */
+constexpr AbsoluteTolerance<float> tolerance_bf16(0.001f);
+TEST_SUITE(BF16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFastMathFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
+ framework::dataset::make("RunTimes", { 0 })),
+ framework::dataset::make("Settings", { CpuMatMulSettings().fast_math(true) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_bf16);
+}
+TEST_SUITE_END() // BF16
+#endif /* ARM_COMPUTE_ENABLE_BF16 */
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp16);
+}
+FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
+ framework::dataset::make("NumberOfRuns", 5)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp16);
+}
+TEST_SUITE_END() // FP16
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+TEST_SUITE_END() // Float
+
+TEST_SUITE_END() // MatMul
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/MatMulFixture.h b/tests/validation/fixtures/MatMulFixture.h
index 1112dcb2fb..bb4a1cd7be 100644
--- a/tests/validation/fixtures/MatMulFixture.h
+++ b/tests/validation/fixtures/MatMulFixture.h
@@ -26,35 +26,38 @@
#include "arm_compute/core/Types.h"
#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/GEMM.h"
#include "tests/validation/reference/Permute.h"
-#include "tests/validation/reference/Permute.h"
#include "tests/validation/reference/ReshapeLayer.h"
#include <random>
+
namespace arm_compute
{
namespace test
{
namespace validation
{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class MatMulValidationFixture : public framework::Fixture
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulGenericValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs,
+ Settings settings)
{
- // For brevity, the input shapes are assumed to be not-transposed for both Lhs and Rhs matrices.
- if(pretranspose_a)
+ // For brevity, the input shapes are assumed to be not-transposed for both a and b matrices.
+ if(transpose_a)
{
permute(shape_a, PermutationVector(1U, 0U));
}
- if(pretranspose_b)
+ if(transpose_b)
{
permute(shape_b, PermutationVector(1U, 0U));
}
- _target = compute_target(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type);
- _reference = compute_reference(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type);
+
+ _target = compute_target(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, settings);
+ _reference = compute_reference(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info);
}
protected:
@@ -76,49 +79,84 @@ protected:
break;
}
default:
+ {
library->fill_tensor_uniform(tensor, i);
+ }
}
}
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type)
+
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type,
+ ActivationLayerInfo act_info, int num_extra_runs, const Settings &settings)
{
// 1. Create Classes and configure function
+ // ----------------------------------------------------
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
- TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
+ // Configure relevant classes and matmul function
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
+
FunctionType matmul;
+
// Configure MatMulInfo class
- MatMulInfo info;
- info.adj_lhs(pretranspose_a);
- info.adj_rhs(pretranspose_b);
- matmul.configure(&a, &b, &dst, info);
+ MatMulInfo mm_info;
+ mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b).fused_activation(act_info);
+
+ // Ensure values are dynamic
+ a.info()->set_are_values_constant(false);
+ b.info()->set_are_values_constant(false);
+
+ // Configure operator
+ matmul.configure(&a, &b, &dst, mm_info, settings);
+
// Assertions
ARM_COMPUTE_ASSERT(a.info()->is_resizable());
ARM_COMPUTE_ASSERT(b.info()->is_resizable());
ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
// Allocate tensors
a.allocator()->allocate();
b.allocator()->allocate();
dst.allocator()->allocate();
+
ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
- // 2. Fill tensors and run once
- // Fill tensors
- fill(AccessorType(a), 0);
- fill(AccessorType(b), 1);
- matmul.run(); // First run
+ // For multiple runs.
+ for(int i = 0; i < num_extra_runs; i++)
+ {
+ // Stress dynamic tensors by running multiple times.
+ // --------------------------------------------------------
+ // Fill tensors with new seed
+ // Run function
+ const int seed_offset = num_extra_runs * 100;
+ fill(AccessorType(a), seed_offset);
+ fill(AccessorType(b), seed_offset + 1);
+
+ matmul.run();
+ }
+
+ // 2. Final Run for reference comparison
+ // --------------------------------------------------------
+ // Re-fill tensors same seed as reference run
+ // Compute MatMul operation
+ fill(AccessorType(a), 2);
+ fill(AccessorType(b), 3);
+
+ matmul.run();
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type)
+
+ SimpleTensor<T> compute_reference(const TensorShape &a_shape, const TensorShape &b_shape, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type,
+ ActivationLayerInfo act_info)
{
// We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 4D
// This is necessary unless we choose to extend gemm reference for 5D+ tensors
TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimW);
- TensorShape a_shape_collapsed = shape_a.collapsed_from(Window::DimW);
- TensorShape b_shape_collapsed = shape_b.collapsed_from(Window::DimW);
+ TensorShape a_shape_collapsed = a_shape.collapsed_from(Window::DimW);
+ TensorShape b_shape_collapsed = b_shape.collapsed_from(Window::DimW);
// Create reference
SimpleTensor<T> a{ a_shape_collapsed, data_type, 1 };
@@ -126,18 +164,19 @@ protected:
SimpleTensor<T> c{ output_shape_collapsed, data_type, 1 };
// Fill reference
- fill(a, 0);
- fill(b, 1);
+ fill(a, 2);
+ fill(b, 3);
- /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_a is set to true, then A is assumed to be (B x K x M),
- therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
- in order to be able to call reference implementation that works with (B x M x K) input.
- Similarly, if pretranspose_b is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
+ /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if transpose_a is set to true, then A is assumed to be (B x K x M),
+ therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
+ in order to be able to call reference implementation that works with (B x M x K) input.
+ Similarly, if transpose_b is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
// Define transposed shapes
TensorShape a_transposed_shape(a.shape());
a_transposed_shape.set(0, a.shape().y());
a_transposed_shape.set(1, a.shape().x());
+
TensorShape b_transposed_shape(b.shape());
b_transposed_shape.set(0, b.shape().y());
b_transposed_shape.set(1, b.shape().x());
@@ -147,13 +186,12 @@ protected:
SimpleTensor<T> b_transposed{ b_transposed_shape, data_type };
// pretranspose a if necessary
- if(pretranspose_a)
+ if(transpose_a)
{
a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U));
}
-
// pretranspose b if necessary
- if(pretranspose_b)
+ if(transpose_b)
{
b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U));
}
@@ -161,7 +199,8 @@ protected:
// Setting beta to 0 will effectively disable C for the
// computation of the reference: alpha * A * B + 0 * C
// Use transposed tensors if boolean enabled else use original tensors
- SimpleTensor<T> result = reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, 1.0f, 0.f);
+ SimpleTensor<T> result = reference::gemm<T>((transpose_a) ? a_transposed : a, (transpose_b) ? b_transposed : b, c, 1.0f, 0.f);
+ result = reference::activation_layer<T>(result, act_info, QuantizationInfo());
// We reshape the gemm output back if the tensor is high dimensional
if(output_shape_collapsed != output_shape)
@@ -171,10 +210,46 @@ protected:
return result;
}
+
TensorType _target{};
SimpleTensor<T> _reference{};
};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulValidationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type)
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, ActivationLayerInfo(), 0,
+ Settings());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulValidationWithActivationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info)
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulValidationWithDynamicTensorsFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs)
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings());
+ }
+};
+
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* TESTS_VALIDATION_FIXTURES_MATMULFIXTURE */
+#endif /* ARM_COMPUTE_TEST_MATMUL_FIXTURE */
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index d40c3eb3d7..0327cd1b35 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -49,6 +49,7 @@
#include "arm_compute/runtime/CL/CLTunerTypes.h"
#include "arm_compute/runtime/CL/CLTypes.h"
#include "arm_compute/runtime/FunctionDescriptors.h"
+#include "arm_compute/runtime/NEON/functions/NEMatMul.h"
#include "arm_compute/runtime/common/LSTMParams.h"
#include "support/Cast.h"
#include "support/StringSupport.h"
@@ -3741,6 +3742,35 @@ inline std::string to_string(const arm_compute::MatMulKernelInfo &matmul_info)
return str.str();
}
+/** Formatted output of the arm_compute::CpuMatMulSettings type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] settings arm_compute::CpuMatMulSettings type to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const arm_compute::CpuMatMulSettings &settings)
+{
+ os << "CpuMatMulSettings="
+ << "["
+ << "fast_math=" << settings.fast_math()
+ << "]";
+
+ return os;
+}
+/** Formatted output of the arm_compute::CpuMatMulSettings type.
+ *
+ * @param[in] settings arm_compute::CpuMatMulSettings type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const arm_compute::CpuMatMulSettings &settings)
+{
+ std::stringstream str;
+ str << settings;
+ return str.str();
+}
+
} // namespace arm_compute
#endif /* __ARM_COMPUTE_TYPE_PRINTER_H__ */