aboutsummaryrefslogtreecommitdiff
path: root/examples/gemm_tuner/cl_gemm_reshaped.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'examples/gemm_tuner/cl_gemm_reshaped.cpp')
-rw-r--r--examples/gemm_tuner/cl_gemm_reshaped.cpp168
1 files changed, 112 insertions, 56 deletions
diff --git a/examples/gemm_tuner/cl_gemm_reshaped.cpp b/examples/gemm_tuner/cl_gemm_reshaped.cpp
index e579ed762c..75f3539cb9 100644
--- a/examples/gemm_tuner/cl_gemm_reshaped.cpp
+++ b/examples/gemm_tuner/cl_gemm_reshaped.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,24 +25,26 @@
#error "This example needs to be built with -DARM_COMPUTE_CL"
#endif /* ARM_COMPUTE_CL */
-#include "CommonGemmExampleOptions.h"
-#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
-#include "arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/runtime/CL/CLFunctions.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CL/CLTuner.h"
+
+#include "examples/gemm_tuner/CommonGemmExampleOptions.h"
+#include "examples/gemm_tuner/GemmTunerHelpers.h"
+#include "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h"
+#include "src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h"
#include "tests/CL/Helper.h"
-#include "utils/Utils.h"
#include "utils/command_line/CommandLineOptions.h"
#include "utils/command_line/CommandLineParser.h"
+#include "utils/Utils.h"
#include <cstdlib>
using namespace arm_compute;
+using namespace arm_compute::opencl::kernels;
using namespace utils;
using namespace arm_compute::misc::shape_calculator;
using namespace gemm_tuner;
@@ -52,15 +54,16 @@ namespace
/** Structure holding all tunable gemm configs specific to this example/strategy */
struct GemmConfigs
{
- size_t m0{ 4 }; /**< Number of rows processed by the matrix multiplication */
- size_t n0{ 4 }; /**< Number of columns processed by the matrix multiplication */
- size_t k0{ 4 }; /**< Number of partial accumulations performed by the matrix multiplication */
- size_t v0{ 1 }; /**< Number of vertical blocks of size (m0xk0) stored on the same output row */
- size_t h0{ 1 }; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */
- bool interleave_lhs{ true }; /**< Interleave lhs matrix */
- bool transpose_lhs{ true }; /**< Transpose lhs matrix. */
- bool interleave_rhs{ true }; /**< Interleave rhs matrix */
- bool transpose_rhs{ true }; /**< Transpose rhs matrix. */
+ size_t m0{4}; /**< Number of rows processed by the matrix multiplication */
+ size_t n0{4}; /**< Number of columns processed by the matrix multiplication */
+ size_t k0{4}; /**< Number of partial accumulations performed by the matrix multiplication */
+ size_t v0{1}; /**< Number of vertical blocks of size (m0xk0) stored on the same output row */
+ size_t h0{1}; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */
+ bool interleave_lhs{true}; /**< Interleave lhs matrix */
+ bool transpose_lhs{true}; /**< Transpose lhs matrix. */
+ bool interleave_rhs{true}; /**< Interleave rhs matrix */
+ bool transpose_rhs{true}; /**< Transpose rhs matrix. */
+ bool export_to_cl_image_rhs{true}; /**< Export rhs matrix to cl_image. */
};
/** Formatted output of the GemmConfigs type
@@ -84,6 +87,7 @@ struct GemmConfigs
os << "transpose_lhs : " << (configs.transpose_lhs ? true_str : false_str) << std::endl;
os << "interleave_rhs : " << (configs.interleave_rhs ? true_str : false_str) << std::endl;
os << "transpose_rhs : " << (configs.transpose_rhs ? true_str : false_str) << std::endl;
+ os << "export_to_cl_image_rhs : " << (configs.export_to_cl_image_rhs ? true_str : false_str) << std::endl;
return os;
}
@@ -103,7 +107,8 @@ public:
h0(parser.add_positional_option<SimpleOption<size_t>>("h0", 1)),
interleave_lhs(parser.add_positional_option<SimpleOption<size_t>>("interleave_lhs", 1)),
interleave_rhs(parser.add_positional_option<SimpleOption<size_t>>("interleave_rhs", 1)),
- transpose_rhs(parser.add_positional_option<SimpleOption<size_t>>("transpose_rhs", 1))
+ transpose_rhs(parser.add_positional_option<SimpleOption<size_t>>("transpose_rhs", 1)),
+ export_to_cl_image_rhs(parser.add_positional_option<SimpleOption<size_t>>("export_to_cl_image_rhs", 1))
{
m0->set_help("Number of rows processed by the matrix multiplication");
n0->set_help("Number of columns processed by the matrix multiplication");
@@ -115,7 +120,10 @@ public:
// FIXME: Currently we only support 2 variants of the gemm reshaped kernels in which transpose_lhs and
// transpose_rhs are the opposites of each other. In the future we may extend the kernels to include the other
// 2 variants (both transposed and none transposed)
- transpose_rhs->set_help("Transpose rhs matrix but not lhs matrix (1) / Do not transpose rhs matrix but do transpose lhs matrix (0)");
+ transpose_rhs->set_help("Transpose rhs matrix but not lhs matrix (1) / Do not transpose rhs matrix but do "
+ "transpose lhs matrix (0)");
+ export_to_cl_image_rhs->set_help(
+ "Export rhs matrix to cl_image (1) / Do not export rhs matrix to cl_image (0)");
}
/** Prevent instances of this class from being copied (As this class contains pointers) */
GemmConfigOptions(const GemmConfigOptions &) = delete;
@@ -128,17 +136,19 @@ public:
/** Default destructor */
~GemmConfigOptions() = default;
- SimpleOption<size_t> *m0; /**< Number of rows processed by the matrix multiplication option */
- SimpleOption<size_t> *n0; /**< Number of columns processed by the matrix multiplication option */
- SimpleOption<size_t> *k0; /**< Number of partial accumulations performed by the matrix multiplication option */
- SimpleOption<size_t> *v0; /**< Number of vertical blocks of size (m0xk0) stored on the same output row option */
- SimpleOption<size_t> *h0; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row option */
+ SimpleOption<size_t> *m0; /**< Number of rows processed by the matrix multiplication option */
+ SimpleOption<size_t> *n0; /**< Number of columns processed by the matrix multiplication option */
+ SimpleOption<size_t> *k0; /**< Number of partial accumulations performed by the matrix multiplication option */
+ SimpleOption<size_t> *v0; /**< Number of vertical blocks of size (m0xk0) stored on the same output row option */
+ SimpleOption<size_t> *h0; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row option */
SimpleOption<size_t> *interleave_lhs; /**< Interleave lhs matrix option (1 enable; 0 disable) */
SimpleOption<size_t> *interleave_rhs; /**< Interleave rhs matrix option (1 enable; 0 disable) */
// FIXME: Currently we only support 2 variants of the gemm reshaped kernels in which transpose_lhs and
// transpose_rhs are the opposites of each other. In the future we may extend the kernels to include the other
// 2 variants (both transposed and none transposed)
- SimpleOption<size_t> *transpose_rhs; /**< Transpose rhs matrix option (1 enable; 0 disable). Also set the lhs matrix transpose option to the opposite. */
+ SimpleOption<size_t> *
+ transpose_rhs; /**< Transpose rhs matrix option (1 enable; 0 disable). Also set the lhs matrix transpose option to the opposite. */
+ SimpleOption<size_t> *export_to_cl_image_rhs; /**< Export rhs matrix to cl_image.*/
};
/** Consumes the gemm configuration options and creates a structure containing all information
@@ -159,17 +169,19 @@ GemmConfigs consume_gemm_configs(const GemmConfigOptions &options)
// FIXME: Currently we only support 2 variants of the gemm reshaped kernels in which transpose_lhs and
// transpose_rhs are the opposites of each other. In the future we may extend the kernels to include the other
// 2 variants (both transposed and none transposed)
- configs.transpose_lhs = options.transpose_rhs->value() == 0;
- configs.interleave_rhs = options.interleave_rhs->value() != 0;
- configs.transpose_rhs = options.transpose_rhs->value() != 0;
+ configs.transpose_lhs = options.transpose_rhs->value() == 0;
+ configs.interleave_rhs = options.interleave_rhs->value() != 0;
+ configs.transpose_rhs = options.transpose_rhs->value() != 0;
+ configs.export_to_cl_image_rhs = options.export_to_cl_image_rhs->value() != 0;
return configs;
}
} // namespace
-// Create function for CLGEMMReshapeLHSMatrixKernel
-using CLGEMMReshapeLHSMatrix = test::CLSynthetizeFunction<CLGEMMReshapeLHSMatrixKernel>;
-// Create function for CLGEMMMatrixMultiplyReshapedKernel
-using CLGEMMMatrixMultiplyReshaped = test::CLSynthetizeFunction<CLGEMMMatrixMultiplyReshapedKernel>;
+
+// Create function for ClGemmReshapeLhsMatrixKernel
+using CLGEMMReshapeLHSMatrix = test::CLSynthetizeOperator<ClGemmReshapeLhsMatrixKernel>;
+// Create function for ClGemmMatrixMultiplyReshapedKernel
+using CLGEMMMatrixMultiplyReshaped = test::CLSynthetizeOperator<ClGemmMatrixMultiplyReshapedKernel>;
class CLGEMMMatrixMultiplyReshapedExample : public Example
{
@@ -177,10 +189,9 @@ public:
bool do_setup(int argc, char **argv) override
{
// Default parameters
- const DataType data_type = DataType::F32;
- const float alpha = 1.0f;
- const float beta = 0.0f;
- const ActivationLayerInfo act_info = ActivationLayerInfo();
+ const float alpha = 1.0f;
+ const float beta = 0.0f;
+ const ActivationLayerInfo act_info = ActivationLayerInfo();
CommonGemmExampleParams params;
GemmConfigs configs;
@@ -191,13 +202,13 @@ public:
// Parse command line options
parser.parse(argc, argv);
- if(param_options.help->is_set() && param_options.help->value())
+ if (param_options.help->is_set() && param_options.help->value())
{
// Print help message
parser.print_help(argv[0]);
return false;
}
- if(!parser.validate())
+ if (!parser.validate())
{
// Invalid arguments. Use default parameters and configs
std::cerr << "Invalid arguments." << std::endl;
@@ -212,16 +223,18 @@ public:
}
// Print gemm parameters and configurations
- std::cerr << "Gemm parameters:" << std::endl;
- std::cerr << params << std::endl;
- std::cerr << "Gemm configurations:" << std::endl;
- std::cerr << configs << std::endl;
+ std::cout << "Gemm parameters:" << std::endl;
+ std::cout << params << std::endl;
+ std::cout << "Gemm configurations:" << std::endl;
+ std::cout << configs << std::endl;
+
+ tuner.set_tuner_mode(params.tuner_mode);
CLScheduler::get().default_init(&tuner);
- lhs.allocator()->init(TensorInfo(TensorShape(params.K, params.M, params.B), 1, data_type));
- rhs.allocator()->init(TensorInfo(TensorShape(params.N, params.K, params.B), 1, data_type));
- bias.allocator()->init(TensorInfo(TensorShape(params.N, 1, params.B), 1, data_type));
+ lhs.allocator()->init(TensorInfo(TensorShape(params.K, params.M, params.B), 1, params.data_type));
+ rhs.allocator()->init(TensorInfo(TensorShape(params.N, params.K, params.B), 1, params.data_type));
+ bias.allocator()->init(TensorInfo(TensorShape(params.N, 1, params.B), 1, params.data_type));
GEMMLHSMatrixInfo lhs_info;
lhs_info.m0 = configs.m0;
@@ -231,11 +244,12 @@ public:
lhs_info.transpose = configs.transpose_lhs;
GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = configs.n0;
- rhs_info.k0 = configs.k0;
- rhs_info.h0 = configs.h0;
- rhs_info.interleave = configs.interleave_rhs;
- rhs_info.transpose = configs.transpose_rhs;
+ rhs_info.n0 = configs.n0;
+ rhs_info.k0 = configs.k0;
+ rhs_info.h0 = configs.h0;
+ rhs_info.interleave = configs.interleave_rhs;
+ rhs_info.transpose = configs.transpose_rhs;
+ rhs_info.export_to_cl_image = configs.export_to_cl_image_rhs;
GEMMKernelInfo kernel_info;
kernel_info.m = params.M;
@@ -246,17 +260,55 @@ public:
kernel_info.broadcast_bias = true;
kernel_info.activation_info = act_info;
+ if (rhs_info.h0 == 0)
+ {
+ rhs_info.h0 = std::max(kernel_info.n / rhs_info.n0, 1U);
+ }
+
// Initialise lhs_reshaped tensor info
- auto_init_if_empty(*lhs_reshaped.info(), lhs.info()->clone()->set_tensor_shape(compute_lhs_reshaped_shape(*lhs.info(), lhs_info)));
+ lhs_reshaped.allocator()->init(
+ TensorInfo(compute_lhs_reshaped_shape(*lhs.info(), lhs_info), 1, params.data_type));
// Initialise rhs_reshaped tensor info
- auto_init_if_empty(*rhs_reshaped.info(), rhs.info()->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*rhs.info(), rhs_info)));
+ rhs_reshaped.allocator()->init(
+ TensorInfo(compute_rhs_reshaped_shape(*rhs.info(), rhs_info), 1, params.data_type));
+
+ if (rhs_info.export_to_cl_image)
+ {
+ if (!examples::gemm_tuner_helpers::update_padding_for_cl_image(rhs_reshaped.info()))
+ {
+ std::cerr << "cl_image is not supported on the device, disable export_to_cl_image" << std::endl;
+ return false;
+ }
+ }
+
+ // Validate argments
+ Status status{};
+ status = reshape_lhs.validate(lhs.info(), lhs_reshaped.info(), lhs_info, kernel_info.reinterpret_input_as_3d);
+ if (!status)
+ {
+ // Unsupported arguments
+ std::cerr << "Unsupported arguments." << std::endl;
+ std::cerr << "Check documentation for supported/unsupported combinations" << std::endl;
+ return false;
+ }
+
+ status = gemm.validate(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info,
+ rhs_info, kernel_info);
+ if (!status)
+ {
+ // Unsupported arguments
+ std::cerr << "Unsupported arguments." << std::endl;
+ std::cerr << "Check documentation for supported/unsupported combinations" << std::endl;
+ return false;
+ }
// Configure reshape lhs function
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
// Configure function
- gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info,
+ rhs_info, kernel_info);
// Allocate tensors
lhs.allocator()->allocate();
@@ -270,9 +322,13 @@ public:
}
void do_run() override
{
- // Execute the function
- reshape_lhs.run();
- gemm.run();
+ // Execute the functions
+ ITensorPack reshape_lsh_pack({{ACL_SRC, &lhs}, {ACL_DST, &lhs_reshaped}});
+ reshape_lhs.run(reshape_lsh_pack);
+
+ ITensorPack gemm_pack(
+ {{ACL_SRC_0, &lhs_reshaped}, {ACL_SRC_1, &rhs_reshaped}, {ACL_SRC_2, &bias}, {ACL_DST, &dst}});
+ gemm.run(gemm_pack);
// Make sure all the OpenCL jobs are done executing:
CLScheduler::get().sync();
@@ -297,7 +353,7 @@ private:
/** Main program for gemm reshaped test
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] v0, [optional] h0, [optional] interleave_lhs, [optional] interleave_rhs, [optional] transpose_rhs )
+ * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] v0, [optional] h0, [optional] interleave_lhs, [optional] interleave_rhs, [optional] transpose_rhs, [optional] export_to_cl_image )
*/
int main(int argc, char **argv)
{