From 282f324f4d6aeff172d447be6dfdf54c3d38b235 Mon Sep 17 00:00:00 2001 From: SiCongLi Date: Tue, 24 Nov 2020 15:24:16 +0000 Subject: COMPMID-3982 Add QASYMM8 support to gemm tuner (reshaped and reshaped_only_rhs) Signed-off-by: SiCongLi Change-Id: Ic6492cfd2701374d837ac53d51b0ddc07e6b1fd1 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4550 Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins --- examples/gemm_tuner/CommonGemmExampleOptions.cpp | 1 + examples/gemm_tuner/benchmark_gemm_examples.sh | 32 +- examples/gemm_tuner/cl_gemm_reshaped.cpp | 2 +- examples/gemm_tuner/cl_gemm_reshaped_rhs_only.cpp | 2 +- examples/gemm_tuner/cl_gemmlowp_reshaped.cpp | 316 ++++++++++++++++++ ...aped_rhs_only_fused_output_stage_fixedpoint.cpp | 364 +++++++++++++++++++++ 6 files changed, 703 insertions(+), 14 deletions(-) create mode 100644 examples/gemm_tuner/cl_gemmlowp_reshaped.cpp create mode 100644 examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp (limited to 'examples') diff --git a/examples/gemm_tuner/CommonGemmExampleOptions.cpp b/examples/gemm_tuner/CommonGemmExampleOptions.cpp index 2e15e62d4e..f50fc63562 100644 --- a/examples/gemm_tuner/CommonGemmExampleOptions.cpp +++ b/examples/gemm_tuner/CommonGemmExampleOptions.cpp @@ -50,6 +50,7 @@ CommonGemmExampleOptions::CommonGemmExampleOptions(CommandLineParser &parser) { DataType::F16, DataType::F32, + DataType::QASYMM8, }; data_type = parser.add_option>("type", supported_data_types, DataType::F32); diff --git a/examples/gemm_tuner/benchmark_gemm_examples.sh b/examples/gemm_tuner/benchmark_gemm_examples.sh index b5628f7be8..8789db91c7 100644 --- a/examples/gemm_tuner/benchmark_gemm_examples.sh +++ b/examples/gemm_tuner/benchmark_gemm_examples.sh @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Arm Limited. +# Copyright (c) 2019-2020 Arm Limited. # # SPDX-License-Identifier: MIT # @@ -35,6 +35,8 @@ ALL_STRATEGY_OPTIONS=("native" "reshaped_rhs_only" "reshaped") EXAMPLE_BIN_NATIVE="benchmark_cl_gemm_native" EXAMPLE_BIN_RESHAPED_RHS_ONLY="benchmark_cl_gemm_reshaped_rhs_only" EXAMPLE_BIN_RESHAPED="benchmark_cl_gemm_reshaped" +EXAMPLE_BIN_RESHAPED_RHS_ONLY_LOWP="benchmark_cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint" +EXAMPLE_BIN_RESHAPED_LOWP="benchmark_cl_gemmlowp_reshaped" # Default data type DEFAULT_DATA_TYPE="F32" @@ -131,7 +133,7 @@ Gemm config file (Strategy reshaped_rhs_only): h0 - Number of horizontal blocks of size (k0xn0) stored on the same output row interleave_rhs - Interleave rhs matrix (1) / Do not interleave rhs matrix (0) transpose_rhs - Transpose rhs matrix (1) / Do not transpose rhs matrix (0) - export_to_cl_image_rhs - Export rhs matrix to cl_image (1) / Do not export rhs matrix to cl_image (0). Can only be true + export_to_cl_image_rhs - (Not supported for quantized types) Export rhs matrix to cl_image (1) / Do not export rhs matrix to cl_image (0). Can only be true with certain combinations of the GEMMParams and other configs. Please refer to CLGEMMReshapeRHSMatrixKernel for more details @@ -172,7 +174,7 @@ Gemm config file (Strategy reshaped): interleave_lhs - Interleave lhs matrix (1) / Do not interleave lhs matrix (0) interleave_rhs - Interleave rhs matrix (1) / Do not interleave rhs matrix (0) transpose_rhs - Transpose rhs matrix but not lhs matrix (1) / Do not transpose rhs matrix but do transpose lhs matrix (0) - export_to_cl_image_rhs - Export rhs matrix to cl_image (1) / Do not export rhs matrix to cl_image (0). Can only be true + export_to_cl_image_rhs - (Not supported for quantized types) Export rhs matrix to cl_image (1) / Do not export rhs matrix to cl_image (0). Can only be true with certain combinations of the GEMMParams and other configs. Please refer to CLGEMMReshapeRHSMatrixKernel for more details @@ -238,8 +240,8 @@ Options: Supported options: Strategy : Data Types Native : F32 - Reshaped : F16, F32 - Reshaped RHS Only : F16, F32 + Reshaped : F16, F32, QASYMM8 + Reshaped RHS Only : F16, F32, QASYMM8 -o Path to output directory that holds output json files @@ -436,7 +438,7 @@ while getopts "hs:e:g:c:d:o:" opt; do e) EXAMPLE_BIN_DIR="${OPTARG}";; g) GEMM_SHAPES_FILE="${OPTARG}";; c) GEMM_CONFIGS_FILE="${OPTARG}";; - d) DATA_TYPE="${OPTARG}";; + d) DATA_TYPE=$(to_lower "${OPTARG}");; o) OUT_DIR="${OPTARG}";; esac done @@ -482,10 +484,16 @@ date +%s > ${OUT_DIR}/start_time_unix_seconds # Run selected strategy with all configurations # Restart the built-in timer -SECONDS=0 -[ "${STRATEGY_OPTION}" == "native" ] && run $EXAMPLE_BIN_NATIVE -[ "${STRATEGY_OPTION}" == "reshaped_rhs_only" ] && run $EXAMPLE_BIN_RESHAPED_RHS_ONLY -[ "${STRATEGY_OPTION}" == "reshaped" ] && run $EXAMPLE_BIN_RESHAPED - -date +%s > ${OUT_DIR}/end_time_unix_seconds +if [ "$DATA_TYPE" == "qasymm8" ]; then + SECONDS=0 + [ "${STRATEGY_OPTION}" == "reshaped_rhs_only" ] && run $EXAMPLE_BIN_RESHAPED_RHS_ONLY_LOWP + [ "${STRATEGY_OPTION}" == "reshaped" ] && run $EXAMPLE_BIN_RESHAPED_LOWP + date +%s > ${OUT_DIR}/end_time_unix_seconds +else + SECONDS=0 + [ "${STRATEGY_OPTION}" == "native" ] && run $EXAMPLE_BIN_NATIVE + [ "${STRATEGY_OPTION}" == "reshaped_rhs_only" ] && run $EXAMPLE_BIN_RESHAPED_RHS_ONLY + [ "${STRATEGY_OPTION}" == "reshaped" ] && run $EXAMPLE_BIN_RESHAPED + date +%s > ${OUT_DIR}/end_time_unix_seconds +fi # Main: Main script }}} diff --git a/examples/gemm_tuner/cl_gemm_reshaped.cpp b/examples/gemm_tuner/cl_gemm_reshaped.cpp index a4d6203d5c..e518b86b4e 100644 --- a/examples/gemm_tuner/cl_gemm_reshaped.cpp +++ b/examples/gemm_tuner/cl_gemm_reshaped.cpp @@ -328,7 +328,7 @@ private: /** Main program for gemm reshaped test * * @param[in] argc Number of arguments - * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] v0, [optional] h0, [optional] interleave_lhs, [optional] interleave_rhs, [optional] transpose_rhs ) + * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] v0, [optional] h0, [optional] interleave_lhs, [optional] interleave_rhs, [optional] transpose_rhs, [optional] export_to_cl_image ) */ int main(int argc, char **argv) { diff --git a/examples/gemm_tuner/cl_gemm_reshaped_rhs_only.cpp b/examples/gemm_tuner/cl_gemm_reshaped_rhs_only.cpp index cf65d0dd33..08bd5d2bd3 100644 --- a/examples/gemm_tuner/cl_gemm_reshaped_rhs_only.cpp +++ b/examples/gemm_tuner/cl_gemm_reshaped_rhs_only.cpp @@ -279,7 +279,7 @@ private: /** Main program for gemm reshaped rhs only test * * @param[in] argc Number of arguments - * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] h0, [optional] interleave_rhs, [optional] transpose_rhs ) + * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] h0, [optional] interleave_rhs, [optional] transpose_rhs, [optional] export_to_cl_image) */ int main(int argc, char **argv) { diff --git a/examples/gemm_tuner/cl_gemmlowp_reshaped.cpp b/examples/gemm_tuner/cl_gemmlowp_reshaped.cpp new file mode 100644 index 0000000000..c45c38411f --- /dev/null +++ b/examples/gemm_tuner/cl_gemmlowp_reshaped.cpp @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */ +#error "This example needs to be built with -DARM_COMPUTE_CL" +#endif /* ARM_COMPUTE_CL */ + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/KernelDescriptors.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/CL/CLScheduler.h" +#include "arm_compute/runtime/CL/CLTuner.h" +#include "examples/gemm_tuner/CommonGemmExampleOptions.h" +#include "examples/gemm_tuner/GemmTunerHelpers.h" +#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h" +#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h" +#include "tests/CL/Helper.h" +#include "utils/Utils.h" +#include "utils/command_line/CommandLineOptions.h" +#include "utils/command_line/CommandLineParser.h" + +#include + +using namespace arm_compute; +using namespace utils; +using namespace arm_compute::misc::shape_calculator; +using namespace gemm_tuner; + +namespace +{ +/** Structure holding all tunable gemm configs specific to this example/strategy */ +struct GemmConfigs +{ + size_t m0{ 4 }; /**< Number of rows processed by the matrix multiplication */ + size_t n0{ 4 }; /**< Number of columns processed by the matrix multiplication */ + size_t k0{ 4 }; /**< Number of partial accumulations performed by the matrix multiplication */ + size_t v0{ 1 }; /**< Number of vertical blocks of size (m0xk0) stored on the same output row */ + size_t h0{ 1 }; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */ + bool interleave_lhs{ true }; /**< Interleave lhs matrix */ + bool transpose_lhs{ true }; /**< Transpose lhs matrix. */ + bool interleave_rhs{ true }; /**< Interleave rhs matrix */ + bool transpose_rhs{ true }; /**< Transpose rhs matrix. */ +}; + +/** Formatted output of the GemmConfigs type + * + * @param[out] os Output stream. + * @param[in] configs Tunable configurations to output + * + * @return Modified output stream. + */ +::std::ostream &operator<<(::std::ostream &os, const GemmConfigs &configs) +{ + std::string false_str = std::string("false"); + std::string true_str = std::string("true"); + + os << "m0 : " << configs.m0 << std::endl; + os << "n0 : " << configs.n0 << std::endl; + os << "k0 : " << configs.k0 << std::endl; + os << "v0 : " << configs.v0 << std::endl; + os << "h0 : " << configs.h0 << std::endl; + os << "interleave_lhs : " << (configs.interleave_lhs ? true_str : false_str) << std::endl; + os << "transpose_lhs : " << (configs.transpose_lhs ? true_str : false_str) << std::endl; + os << "interleave_rhs : " << (configs.interleave_rhs ? true_str : false_str) << std::endl; + os << "transpose_rhs : " << (configs.transpose_rhs ? true_str : false_str) << std::endl; + return os; +} + +/** Command line options for gemm configs */ +class GemmConfigOptions +{ +public: + /** Constructor + * + * @param[in,out] parser A parser on which "parse()" hasn't been called yet. + */ + GemmConfigOptions(CommandLineParser &parser) + : m0(parser.add_positional_option>("m0", 4)), + n0(parser.add_positional_option>("n0", 4)), + k0(parser.add_positional_option>("k0", 4)), + v0(parser.add_positional_option>("v0", 1)), + h0(parser.add_positional_option>("h0", 1)), + interleave_lhs(parser.add_positional_option>("interleave_lhs", 1)), + interleave_rhs(parser.add_positional_option>("interleave_rhs", 1)), + transpose_rhs(parser.add_positional_option>("transpose_rhs", 1)) + { + m0->set_help("Number of rows processed by the matrix multiplication"); + n0->set_help("Number of columns processed by the matrix multiplication"); + k0->set_help("Number of partial accumulations performed by the matrix multiplication"); + v0->set_help("Number of vertical blocks of size (m0xk0) stored on the same output row"); + h0->set_help("Number of horizontal blocks of size (k0xn0) stored on the same output row"); + interleave_lhs->set_help("Interleave lhs matrix (1) / Do not interleave lhs matrix (0)"); + interleave_rhs->set_help("Interleave rhs matrix (1) / Do not interleave rhs matrix (0)"); + // FIXME: Currently we only support 2 variants of the gemm reshaped kernels in which transpose_lhs and + // transpose_rhs are the opposites of each other. In the future we may extend the kernels to include the other + // 2 variants (both transposed and none transposed) + transpose_rhs->set_help("Transpose rhs matrix but not lhs matrix (1) / Do not transpose rhs matrix but do transpose lhs matrix (0)"); + } + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GemmConfigOptions(const GemmConfigOptions &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GemmConfigOptions &operator=(const GemmConfigOptions &) = delete; + /** Allow instances of this class to be moved */ + GemmConfigOptions(GemmConfigOptions &&) = default; + /** Allow instances of this class to be moved */ + GemmConfigOptions &operator=(GemmConfigOptions &&) = default; + /** Default destructor */ + ~GemmConfigOptions() = default; + + SimpleOption *m0; /**< Number of rows processed by the matrix multiplication option */ + SimpleOption *n0; /**< Number of columns processed by the matrix multiplication option */ + SimpleOption *k0; /**< Number of partial accumulations performed by the matrix multiplication option */ + SimpleOption *v0; /**< Number of vertical blocks of size (m0xk0) stored on the same output row option */ + SimpleOption *h0; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row option */ + SimpleOption *interleave_lhs; /**< Interleave lhs matrix option (1 enable; 0 disable) */ + SimpleOption *interleave_rhs; /**< Interleave rhs matrix option (1 enable; 0 disable) */ + // FIXME: Currently we only support 2 variants of the gemm reshaped kernels in which transpose_lhs and + // transpose_rhs are the opposites of each other. In the future we may extend the kernels to include the other + // 2 variants (both transposed and none transposed) + SimpleOption *transpose_rhs; /**< Transpose rhs matrix option (1 enable; 0 disable). Also set the lhs matrix transpose option to the opposite. */ +}; + +/** Consumes the gemm configuration options and creates a structure containing all information + * + * @param[in] options Options to consume + * + * @return Structure containing the gemm configurations + */ +GemmConfigs consume_gemm_configs(const GemmConfigOptions &options) +{ + GemmConfigs configs; + configs.m0 = options.m0->value(); + configs.n0 = options.n0->value(); + configs.k0 = options.k0->value(); + configs.v0 = options.v0->value(); + configs.h0 = options.h0->value(); + configs.interleave_lhs = options.interleave_lhs->value() != 0; + // FIXME: Currently we only support 2 variants of the gemm reshaped kernels in which transpose_lhs and + // transpose_rhs are the opposites of each other. In the future we may extend the kernels to include the other + // 2 variants (both transposed and none transposed) + configs.transpose_lhs = options.transpose_rhs->value() == 0; + configs.interleave_rhs = options.interleave_rhs->value() != 0; + configs.transpose_rhs = options.transpose_rhs->value() != 0; + return configs; +} + +} // namespace + +using CLGEMMReshapeLHSMatrix = test::CLSynthetizeFunction; +using CLGEMMLowpMatrixMultiplyReshaped = test::CLSynthetizeFunction; + +class CLGEMMLowpMatrixMultiplyReshapedExample : public Example +{ +public: + bool do_setup(int argc, char **argv) override + { + // Default parameters + CommonGemmExampleParams params; + GemmConfigs configs; + + // Parse command line options + CommandLineParser parser; + CommonGemmExampleOptions param_options(parser); + GemmConfigOptions config_options(parser); + + parser.parse(argc, argv); + if(param_options.help->is_set() && param_options.help->value()) + { + parser.print_help(argv[0]); + return false; + } + if(!parser.validate()) + { + // Invalid arguments. Use default parameters and configs + std::cerr << "Invalid arguments." << std::endl; + parser.print_help(argv[0]); + std::cerr << "Falling back to default parameters and configs" << std::endl; + } + else + { + params = consume_common_gemm_example_parameters(param_options); + configs = consume_gemm_configs(config_options); + } + + std::cout << "Gemm parameters:" << std::endl; + std::cout << params << std::endl; + std::cout << "Gemm configurations:" << std::endl; + std::cout << configs << std::endl; + + CLScheduler::get().default_init(&tuner); + + lhs.allocator()->init(TensorInfo(TensorShape(params.K, params.M, params.B), 1, params.data_type)); + rhs.allocator()->init(TensorInfo(TensorShape(params.N, params.K, params.B), 1, params.data_type)); + + // Set arbitrary quantization information + lhs.info()->set_quantization_info({ 0.012, 3 }); + rhs.info()->set_quantization_info({ 0.012, 3 }); + dst.info()->set_quantization_info({ 0.012, 3 }); + + GEMMLHSMatrixInfo lhs_info; + lhs_info.m0 = configs.m0; + lhs_info.k0 = configs.k0; + lhs_info.v0 = configs.v0; + lhs_info.interleave = configs.interleave_lhs; + lhs_info.transpose = configs.transpose_lhs; + + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = configs.n0; + rhs_info.k0 = configs.k0; + rhs_info.h0 = configs.h0; + rhs_info.interleave = configs.interleave_rhs; + rhs_info.transpose = configs.transpose_rhs; + rhs_info.export_to_cl_image = false; // CL image not supported for quantized cases yet + + lhs_reshaped.allocator()->init(TensorInfo(compute_lhs_reshaped_shape(*lhs.info(), lhs_info), 1, params.data_type)); + + rhs_reshaped.allocator()->init(TensorInfo(compute_rhs_reshaped_shape(*rhs.info(), rhs_info), 1, params.data_type)); + + if(rhs_info.export_to_cl_image) + { + examples::gemm_tuner_helpers::update_padding_for_cl_image(rhs_reshaped.info()); + } + + GEMMReshapeInfo gemm_info + { + static_cast(params.M), + static_cast(params.N), + static_cast(params.K), + static_cast(configs.h0), + static_cast(configs.v0), + 0, + false, + true + }; + + // Validate argments + if(!reshape_lhs.validate(lhs.info(), lhs_reshaped.info(), lhs_info, gemm_info.reinterpret_input_as_3d())) + { + std::cerr << "Invalid arguments for CLGEMMReshapeLHSMatrixKernel." << std::endl; + return false; + } + + if(!gemm.validate(lhs_reshaped.info(), rhs_reshaped.info(), dst.info(), lhs_info, rhs_info, gemm_info)) + { + std::cerr << "Invalid arguments for CLGEMMLowpMatrixMultiplyReshapedKernel." << std::endl; + return false; + } + + // Configure functions + reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info); + + gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, gemm_info); + + // Allocate tensors + lhs.allocator()->allocate(); + rhs.allocator()->allocate(); + lhs_reshaped.allocator()->allocate(); + rhs_reshaped.allocator()->allocate(); + dst.allocator()->allocate(); + + return true; + } + void do_run() override + { + reshape_lhs.run(); + gemm.run(); + + // Make sure all the OpenCL jobs are done executing: + CLScheduler::get().sync(); + } + + void do_teardown() override + { + } + +private: + CLTensor lhs{}; + CLTensor rhs{}; + CLTensor lhs_reshaped{}; + CLTensor rhs_reshaped{}; + CLTensor dst{}; + CLTuner tuner{}; + CLGEMMReshapeLHSMatrix reshape_lhs{}; + CLGEMMLowpMatrixMultiplyReshaped gemm{}; +}; + +/** Main test program for gemmlowp reshaped + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] v0, [optional] h0, [optional] interleave_lhs, [optional] interleave_rhs, [optional] transpose_rhs ) + */ +int main(int argc, char **argv) +{ + return run_example(argc, argv); +} diff --git a/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp b/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp new file mode 100644 index 0000000000..c6818e48b0 --- /dev/null +++ b/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */ +#error "This example needs to be built with -DARM_COMPUTE_CL" +#endif /* ARM_COMPUTE_CL */ + +#include "CommonGemmExampleOptions.h" +#include "GemmTunerHelpers.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/KernelDescriptors.h" +#include "arm_compute/core/KernelDescriptors.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" +#include "arm_compute/runtime/CL/CLScheduler.h" +#include "arm_compute/runtime/CL/CLTuner.h" +#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h" +#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h" +#include "tests/CL/Helper.h" +#include "utils/Utils.h" +#include "utils/command_line/CommandLineOptions.h" +#include "utils/command_line/CommandLineParser.h" + +#include +#include + +using namespace arm_compute; +using namespace utils; +using namespace arm_compute::misc::shape_calculator; +using namespace gemm_tuner; + +namespace +{ +/** Structure holding all tunable gemm configs specific to this example/strategy */ +struct GemmConfigs +{ + size_t m0{ 4 }; /**< Number of rows processed by the matrix multiplication */ + size_t n0{ 4 }; /**< Number of columns processed by the matrix multiplication */ + size_t k0{ 4 }; /**< Number of partial accumulations performed by the matrix multiplication */ + size_t h0{ 1 }; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */ + bool interleave_rhs{ true }; /**< Interleave rhs matrix */ + bool transpose_rhs{ true }; /**< Transpose rhs matrix */ +}; + +/** Formatted output of the GemmConfigs type + * + * @param[out] os Output stream. + * @param[in] configs Tunable configurations to output + * + * @return Modified output stream. + */ +::std::ostream &operator<<(::std::ostream &os, const GemmConfigs &configs) +{ + std::string false_str = std::string("false"); + std::string true_str = std::string("true"); + + os << "m0 : " << configs.m0 << std::endl; + os << "n0 : " << configs.n0 << std::endl; + os << "k0 : " << configs.k0 << std::endl; + os << "h0 : " << configs.h0 << std::endl; + os << "interleave_rhs : " << (configs.interleave_rhs ? true_str : false_str) << std::endl; + os << "transpose_rhs : " << (configs.transpose_rhs ? true_str : false_str) << std::endl; + return os; +} + +/** Command line options for gemm configs */ +class GemmConfigOptions +{ +public: + /** Constructor + * + * @param[in,out] parser A parser on which "parse()" hasn't been called yet. + */ + GemmConfigOptions(CommandLineParser &parser) + : m0(parser.add_positional_option>("m0", 4)), + n0(parser.add_positional_option>("n0", 4)), + k0(parser.add_positional_option>("k0", 4)), + h0(parser.add_positional_option>("h0", 1)), + interleave_rhs(parser.add_positional_option>("interleave_rhs", 1)), + transpose_rhs(parser.add_positional_option>("transpose_rhs", 1)) + { + m0->set_help("Number of rows processed by the matrix multiplication"); + n0->set_help("Number of columns processed by the matrix multiplication"); + k0->set_help("Number of partial accumulations performed by the matrix multiplication"); + h0->set_help("Number of horizontal blocks of size (k0xn0) stored on the same output row"); + interleave_rhs->set_help("Interleave rhs matrix (1) / Do not interleave rhs matrix (0)"); + transpose_rhs->set_help("Transpose rhs matrix (1) / Do not transpose rhs matrix (0)"); + } + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GemmConfigOptions(const GemmConfigOptions &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GemmConfigOptions &operator=(const GemmConfigOptions &) = delete; + /** Allow instances of this class to be moved */ + GemmConfigOptions(GemmConfigOptions &&) = default; + /** Allow instances of this class to be moved */ + GemmConfigOptions &operator=(GemmConfigOptions &&) = default; + /** Default destructor */ + ~GemmConfigOptions() = default; + + SimpleOption *m0; /**< Number of rows processed by the matrix multiplication option */ + SimpleOption *n0; /**< Number of columns processed by the matrix multiplication option */ + SimpleOption *k0; /**< Number of partial accumulations performed by the matrix multiplication option */ + SimpleOption *h0; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row option */ + SimpleOption *interleave_rhs; /**< Interleave rhs matrix option (1 enable; 0 disable) */ + SimpleOption *transpose_rhs; /**< Transpose rhs matrix option (1 enable; 0 disable) */ +}; + +/** Consumes the gemm configuration options and creates a structure containing all information + * + * @param[in] options Options to consume + * + * @return Structure containing the gemm configurations + */ +GemmConfigs consume_gemm_configs(const GemmConfigOptions &options) +{ + GemmConfigs configs; + configs.m0 = options.m0->value(); + configs.n0 = options.n0->value(); + configs.k0 = options.k0->value(); + configs.h0 = options.h0->value(); + configs.interleave_rhs = options.interleave_rhs->value() != 0; + configs.transpose_rhs = options.transpose_rhs->value() != 0; + return configs; +} + +} // namespace + +using CLGEMMLowpMatrixMultiplyReshapedOnlyRHS = test::CLSynthetizeFunction; +using CLGEMMLowpMatrixAReduction = test::CLSynthetizeFunction; + +class CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFusedOutputStageFixedpointExample : public Example +{ +public: + bool do_setup(int argc, char **argv) override + { + // Default parameters + CommonGemmExampleParams params; + GemmConfigs configs; + + // Parse command line options + CommandLineParser parser; + CommonGemmExampleOptions param_options(parser); + GemmConfigOptions config_options(parser); + + parser.parse(argc, argv); + if(param_options.help->is_set() && param_options.help->value()) + { + parser.print_help(argv[0]); + return false; + } + if(!parser.validate()) + { + // Invalid arguments. Use default parameters and configs + std::cerr << "Invalid arguments." << std::endl; + parser.print_help(argv[0]); + std::cerr << "Falling back to default parameters and configs" << std::endl; + } + else + { + params = consume_common_gemm_example_parameters(param_options); + configs = consume_gemm_configs(config_options); + } + + std::cout << "Gemm parameters:" << std::endl; + std::cout << params << std::endl; + std::cout << "Gemm configurations:" << std::endl; + std::cout << configs << std::endl; + + CLScheduler::get().default_init(&tuner); + + lhs.allocator()->init(TensorInfo(TensorShape(params.K, params.M, params.B), 1, params.data_type)); + rhs.allocator()->init(TensorInfo(TensorShape(params.N, params.K, params.B), 1, params.data_type)); + bias.allocator()->init(TensorInfo(TensorShape(params.N, 1, params.B), 1, DataType::S32)); + dst.allocator()->init(TensorInfo(TensorShape(params.N, params.M, params.B), 1, params.data_type)); + + // Set arbitrary quantization information (non-zero offset to ensure offset contribution stage is included) + // Could be extended in the future to include a user-controlled option for offset == 0 + lhs.info()->set_quantization_info({ 0.012, 3 }); + rhs.info()->set_quantization_info({ 0.012, 3 }); + bias.info()->set_quantization_info({ 0.012, 3 }); + dst.info()->set_quantization_info({ 0.012, 3 }); + + GEMMLHSMatrixInfo lhs_info; + lhs_info.m0 = configs.m0; + lhs_info.k0 = configs.k0; + + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = configs.n0; + rhs_info.k0 = configs.k0; + rhs_info.h0 = configs.h0; + rhs_info.interleave = configs.interleave_rhs; + rhs_info.transpose = configs.transpose_rhs; + rhs_info.export_to_cl_image = false; // CL image not supported for quantized cases yet + + rhs_reshaped.allocator()->init(TensorInfo(compute_rhs_reshaped_shape(*rhs.info(), rhs_info), 1, params.data_type)); + if(rhs_info.export_to_cl_image) + { + examples::gemm_tuner_helpers::update_padding_for_cl_image(rhs_reshaped.info()); + } + + // Configure output stage for quantized case + GEMMLowpOutputStageInfo gemmlowp_output_stage; + gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT; + gemmlowp_output_stage.output_data_type = dst.info()->data_type(); + gemmlowp_output_stage.gemmlowp_offset = 0; + { + const int idx_kernels = get_data_layout_dimension_index(lhs.info()->data_layout(), DataLayoutDimension::BATCHES); + gemmlowp_output_stage.is_quantized_per_channel = false; + // Num_filters is 1 unless quantized type is of per_channel type. Could be extended in the future to support per-channel quantization. + const unsigned int num_filters = 1; + + dst_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32)); + dst_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32)); + + gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters); + gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters); + quantization::compute_quantized_multipliers_and_shifts(lhs.info(), + rhs.info(), + dst.info(), + idx_kernels, + gemmlowp_output_stage.gemmlowp_multipliers.data(), + gemmlowp_output_stage.gemmlowp_shifts.data()); + gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0]; + gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0]; + + // No fused activation + PixelValue min_val{}; + PixelValue max_val{}; + std::tie(min_val, max_val) = get_min_max(dst.info()->data_type()); + + auto min_activation = min_val.get(); + auto max_activation = max_val.get(); + + // Set the GEMMLowp output stage info + gemmlowp_output_stage.gemmlowp_offset = dst.info()->quantization_info().uniform().offset; + gemmlowp_output_stage.gemmlowp_min_bound = min_activation; + gemmlowp_output_stage.gemmlowp_max_bound = max_activation; + } + + GEMMKernelInfo gemm_info; + gemm_info.m = params.M; + gemm_info.n = params.N; + gemm_info.k = params.K; + gemm_info.depth_output_gemm3d = 0; + gemm_info.reinterpret_input_as_3d = false; + gemm_info.broadcast_bias = true; + gemm_info.fp_mixed_precision = false; + gemm_info.has_pad_y = false; + gemm_info.mult_transpose1xW_width = configs.h0; + gemm_info.lhs_info = lhs_info; + gemm_info.rhs_info = rhs_info; + gemm_info.a_offset = lhs.info()->quantization_info().uniform().offset; + gemm_info.b_offset = rhs.info()->quantization_info().uniform().offset; + gemm_info.output_stage = gemmlowp_output_stage; + + // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0 + if(gemm_info.b_offset != 0) + { + const TensorInfo info_vector_sum_row(compute_reductionB_shape(*lhs.info()), 1, DataType::S32); + vector_sum_row.allocator()->init(info_vector_sum_row); + + mtx_a_reduction = support::cpp14::make_unique(); + + if(!mtx_a_reduction->validate(lhs.info(), vector_sum_row.info(), GEMMLowpReductionKernelInfo{})) + { + std::cerr << "Invalid arguments for CLGEMMLowpMatrixAReductionKernel." << std::endl; + return false; + } + + mtx_a_reduction->configure(&lhs, &vector_sum_row, GEMMLowpReductionKernelInfo{}); + } + // Initialize matrix B reduction kernel only if _a_offset is not equal to 0 + if(gemm_info.a_offset != 0) + { + const TensorInfo info_vector_sum_col(compute_reductionA_shape(*rhs.info()), 1, DataType::S32); + vector_sum_col.allocator()->init(info_vector_sum_col); + // There's no need for a Matrix B reduction kernel as this is assumed to be run only once in the prepare stage + } + + // Validate argments + if(!gemm.validate(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, gemm_info.a_offset == 0 ? nullptr : vector_sum_col.info(), + gemm_info.b_offset == 0 ? nullptr : vector_sum_row.info(), bias.info(), dst_multipliers.info(), dst_shifts.info())) + { + std::cerr << "Invalid arguments for CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel." << std::endl; + return false; + } + + // Configure function + gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info, gemm_info.a_offset == 0 ? nullptr : &vector_sum_col, gemm_info.b_offset == 0 ? nullptr : &vector_sum_row, &bias, &dst_multipliers, &dst_shifts); + + // Allocate tensors + lhs.allocator()->allocate(); + rhs.allocator()->allocate(); + rhs_reshaped.allocator()->allocate(); + bias.allocator()->allocate(); + dst.allocator()->allocate(); + vector_sum_col.allocator()->allocate(); + vector_sum_row.allocator()->allocate(); + dst_multipliers.allocator()->allocate(); + dst_shifts.allocator()->allocate(); + + return true; + } + void do_run() override + { + if(mtx_a_reduction != nullptr) + { + mtx_a_reduction->run(); + } + gemm.run(); + + // Make sure all the OpenCL jobs are done executing: + CLScheduler::get().sync(); + } + + void do_teardown() override + { + } + +private: + CLTensor lhs{}; + CLTensor rhs{}; + CLTensor rhs_reshaped{}; + CLTensor bias{}; + CLTensor dst{}; + CLTensor vector_sum_col{}; + CLTensor vector_sum_row{}; + CLTensor dst_multipliers{}; + CLTensor dst_shifts{}; + CLTuner tuner{}; + CLGEMMLowpMatrixMultiplyReshapedOnlyRHS gemm{}; + std::unique_ptr mtx_a_reduction{ nullptr }; +}; + +/** Main test program for gemmlowp reshaped rhs only with fused output stage fixedpoint + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] h0, [optional] interleave_rhs, [optional] transpose_rhs ) + */ +int main(int argc, char **argv) +{ + return run_example(argc, argv); +} -- cgit v1.2.1