aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2023-08-04 15:26:41 +0100
committerViet-Hoa Do <viet-hoa.do@arm.com>2023-08-07 08:42:14 +0000
commit78ce2730ecd2f1e666cdd10263bf054c0b740a9c (patch)
treed7c6f35a87c2f417299fde5441dd622cedceca95
parent4f76a00a40947b9e3549c18d319cf057c6f0271e (diff)
downloadComputeLibrary-78ce2730ecd2f1e666cdd10263bf054c0b740a9c.tar.gz
Document the Conv2D heuristic
- Add a new section in the documentation to describe how the conv2D heuristic works on Arm® Cortex®-based CPUs and Arm® Mali™-based GPUs - Add CKW_UNUSED in compute_kernel_writer/src/cl/CLTile.cpp to avoid the compilation error due to an unused variable - Remove FFT from the list of algorithms to be selected by the CPU Conv2d heuristic. Resolves COMPMID-6163 Signed-off-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Change-Id: I51384d7749451b2562642683e8b2429a355166bb Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10065 Benchmark: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--compute_kernel_writer/src/cl/CLTile.cpp1
-rw-r--r--docs/Doxyfile1
-rw-r--r--docs/DoxygenLayout.xml1
-rw-r--r--docs/user_guide/conv2d_heuristic.dox89
-rw-r--r--src/cpu/operators/CpuConv2d.cpp6
5 files changed, 93 insertions, 5 deletions
diff --git a/compute_kernel_writer/src/cl/CLTile.cpp b/compute_kernel_writer/src/cl/CLTile.cpp
index c6cf47d831..013ac4c276 100644
--- a/compute_kernel_writer/src/cl/CLTile.cpp
+++ b/compute_kernel_writer/src/cl/CLTile.cpp
@@ -224,6 +224,7 @@ std::vector<int32_t> CLTile::supported_vector_lengths() const
void CLTile::validate_tile_info(const TileInfo &info) const
{
+ CKW_UNUSED(info);
CKW_ASSERT_MSG(cl_validate_vector_length(info.width()), "Unsupported TileInfo width");
CKW_ASSERT_MSG(info.data_type() != DataType::Unknown, "DataType::Unknown is not supported");
}
diff --git a/docs/Doxyfile b/docs/Doxyfile
index 3a78cdf93f..186f66c086 100644
--- a/docs/Doxyfile
+++ b/docs/Doxyfile
@@ -773,6 +773,7 @@ INPUT = ./docs/user_guide/introduction.dox \
./docs/user_guide/library.dox \
./docs/user_guide/data_type.dox \
./docs/user_guide/data_layout.dox \
+ ./docs/user_guide/conv2d_heuristic.dox \
./docs/user_guide/operator_list.dox \
./docs/user_guide/tests.dox \
./docs/user_guide/advanced.dox \
diff --git a/docs/DoxygenLayout.xml b/docs/DoxygenLayout.xml
index fb42ba0535..4e09e20e3d 100644
--- a/docs/DoxygenLayout.xml
+++ b/docs/DoxygenLayout.xml
@@ -8,6 +8,7 @@
<tab type="user" url="@ref architecture" title="Library Architecture"/>
<tab type="user" url="@ref data_type_support" title="Data Type Support"/>
<tab type="user" url="@ref data_layout_support" title="Data Layout Support"/>
+ <tab type="user" url="@ref conv2d_heuristic" title="Convolution 2D heuristic"/>
<tab type="user" url="@ref operators_list" title="Operator List"/>
<tab type="user" url="@ref tests" title="Validation and benchmarks"/>
<tab type="user" url="@ref advanced" title="Advanced"/>
diff --git a/docs/user_guide/conv2d_heuristic.dox b/docs/user_guide/conv2d_heuristic.dox
new file mode 100644
index 0000000000..edd24a3d36
--- /dev/null
+++ b/docs/user_guide/conv2d_heuristic.dox
@@ -0,0 +1,89 @@
+///
+/// Copyright (c) 2023 Arm Limited.
+///
+/// SPDX-License-Identifier: MIT
+///
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to
+/// deal in the Software without restriction, including without limitation the
+/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+/// sell copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in all
+/// copies or substantial portions of the Software.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+/// SOFTWARE.
+///
+
+namespace arm_compute
+{
+/**
+@page conv2d_heuristic Convolution 2D heuristic
+
+@section conv2d_heuristic_algorithms_used Convolution 2D heuristic: algorithm selection
+
+The convolution 2D (in short, conv2D) is certainly one of the most compute intensive and performance critical operators in ML workloads.
+This operator can be implemented with different algorithms, which differ in terms of accuracy, kernel size support, and additional memory required.
+Unfortunately, it does not exist a single algorithm that can be used in all scenarios to achieve the best performance.
+Therefore, the Arm Compute Library integrates an heuristic within the conv2d operators to select the most efficient algorithm, depending on input and kernel shapes and desired level of accuracy.
+The heuristic depends on the target backend (either NEON™ for Arm® CPUs or OpenCL for Arm® GPUs) and the following subsections will provide the main details behind the selection of the algorithm.
+
+⚠ Attention: The heuristics presented in the following subsections will only refer to the NHWC data layout, which is the optimal and recommended layout for the Arm Compute Library.
+
+@subsection conv2d_heuristic_on_cpu Convolution 2D heuristic: Arm® Cortex®-based CPUs
+
+The conv2d heuristic for Arm® Cortex®-based CPUs is inside the get_convolution_method() method in the CpuConv2d function.
+The algorithms used in the get_convolution_method() function are the following:
+- Direct-Conv2D
+- Im2Col+GeMM-based
+- Indirect-GeMM (a.k.a. GEMMCONV2D)
+- GeMM
+- Winograd
+
+⚠ Attention: Winograd only works with floating-point data types (F32, F16)
+
+The heuristic first checks less frequent cases that we may have in ML workloads for edge devices. These cases are the following:
+-# Non unit dilation: We call Im2Col+GeMM
+-# Large input and kernel shapes: We call Direct-Conv2D because it is the only algorithm that does not extra additionally temporary memory
+-# Small Input-Feature-Maps (IFM): In this scenario, we have found that the GeMM implementation is generally the most efficient algorithm compared to Winograd and Indirect-GeMM
+
+If we have a most frequent case, such as unit dilations, of larger IFM, we evaluate the following conditions instead:
+-# Unit kernel size (1x1): In this scenario, the conv2d operations corresponds to a matrix multiplication and we call GeMM.
+-# Winograd. Winograd only works with unit strides and supports a limited number of kernel sizes, such as 3x3, 3x1, 1x3, 5x1, 1x5 and 5x5
+-# Indirect-GeMM: It should be used in all cases expect when the kernel size is 1x1 or when the IFM is small
+
+If the preceding cases are not met, we will fall-back to the Im2Col+GeMM-based algorithm.
+
+@subsection conv2d_heuristic_on_gpu Convolution 2D heuristic: Arm® Mali™-based GPUs
+
+The conv2d heuristic for Arm® Mali™-based GPUs is inside the get_convolution_method() method in the ClConv2d function.
+
+The algorithms used in the get_convolution_method() function are the following:
+- Direct-Conv2D
+- Im2Col+GeMM-based
+- Indirect-GeMM
+- GeMM
+- Winograd
+
+⚠ Attention: Winograd only works with floating-point data types (F32, F16)
+
+The heuristic first checks less frequent cases that we may have in ML workloads for edge devices. These cases are the following:
+-# Non unit dilation: We call Im2Col+GeMM
+-# Large input and kernel shapes: We call Direct-Conv2D because it is the only algorithm that does not extra additionally temporary memory
+
+In all the other cases, the GPU heuristic evaluates the suitability of Winograd and Direct-Conv2D/Indirect-Conv2D.
+In particular, Winograd is adopted when the convolution parameters (kernel size and strides) are supported by the algorithm and when the IFM is not small (for example, greater than 8).
+The conditions for using the Direct-Conv2D algorithms are several and we recommend you look at the heuristic directly.
+In general, the Direct-Conv2D operators is used in almost all cases where kernel size is not 1x1.
+The Indirect-GeMM algorithm is used in alternative to Direct-Conv2D only for Arm® Mali™-G77 GPU.
+If neither Winograd nor Direct-Conv2D can be used, we will fall-back to either GeMM (when the kernel size is 1x1) or the Im2Col+GeMM-based algorithm.
+
+*/
+} // namespace
diff --git a/src/cpu/operators/CpuConv2d.cpp b/src/cpu/operators/CpuConv2d.cpp
index fa8a7a185c..447b740989 100644
--- a/src/cpu/operators/CpuConv2d.cpp
+++ b/src/cpu/operators/CpuConv2d.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -178,10 +178,6 @@ ConvolutionMethod CpuConv2d::get_convolution_method(const ITensorInfo *input, co
{
return ConvolutionMethod::DIRECT;
}
- if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (NEFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
- {
- return ConvolutionMethod::FFT;
- }
if(input->dimension(idx_c) < 16)
{
return ConvolutionMethod::GEMM;