aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSheri Zhang <sheri.zhang@arm.com>2021-04-30 16:53:41 +0100
committerSang-Hoon Park <sang-hoon.park@arm.com>2021-05-05 09:40:36 +0000
commitd813bab10bb4fe954fa0e962e1402ed1377617da (patch)
tree6e107f6788fc7f396087e8efa29161bfeb2099cc
parent6124ce60b54eb5639ed19d46c79fce21cca2c83b (diff)
downloadComputeLibrary-d813bab10bb4fe954fa0e962e1402ed1377617da.tar.gz
Restructure documentation
The documentation has been restructured for better grouping and readability. Resolves: COMPMID-4198 Signed-off-by: Sheri Zhang <sheri.zhang@arm.com> Change-Id: I8c8bc77f0aab8d63f1659f2235dbab634422a68c Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5568 Tested-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--docs/Doxyfile26
-rw-r--r--docs/DoxygenLayout.xml212
-rw-r--r--docs/contributor_guide/adding_operator.dox (renamed from docs/04_adding_operator.dox)4
-rw-r--r--docs/contributor_guide/contribution_guidelines.dox (renamed from docs/05_contribution_guidelines.dox)2
-rw-r--r--docs/contributor_guide/implementation_topics.dox143
-rw-r--r--docs/user_guide/advanced.dox114
-rw-r--r--docs/user_guide/api.dox (renamed from docs/08_api.dox)0
-rw-r--r--docs/user_guide/data_layout.dox41
-rw-r--r--docs/user_guide/data_type.dox47
-rw-r--r--docs/user_guide/errata.dox (renamed from docs/07_errata.dox)0
-rw-r--r--docs/user_guide/how_to_build_and_run_examples.dox541
-rw-r--r--docs/user_guide/introduction.dox74
-rw-r--r--docs/user_guide/library.dox (renamed from docs/01_library.dox)171
-rw-r--r--docs/user_guide/programming_model.dox70
-rw-r--r--docs/user_guide/release_version_and_change_log.dox (renamed from docs/00_introduction.dox)649
-rw-r--r--docs/user_guide/tests.dox (renamed from docs/02_tests.dox)2
16 files changed, 1268 insertions, 828 deletions
diff --git a/docs/Doxyfile b/docs/Doxyfile
index 6fb5de7020..5a76c0538f 100644
--- a/docs/Doxyfile
+++ b/docs/Doxyfile
@@ -687,7 +687,7 @@ FILE_VERSION_FILTER =
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
-LAYOUT_FILE =
+LAYOUT_FILE = ./docs/DoxygenLayout.xml
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
@@ -768,16 +768,20 @@ WARN_LOGFILE =
# spaces.
# Note: If this tag is empty the current directory is searched.
-INPUT = ./docs/00_introduction.dox \
- ./docs/01_library.dox \
- ./docs/02_tests.dox \
- ./docs/03_scripts.dox \
- ./docs/04_adding_operator.dox \
- ./docs/05_contribution_guidelines.dox \
- ./docs/06_functions_list.dox \
- ./docs/07_errata.dox \
- ./docs/08_api.dox \
- ./docs/09_operators_list.dox \
+INPUT = ./docs/user_guide/introduction.dox \
+ ./docs/user_guide/how_to_build_and_run_examples.dox \
+ ./docs/user_guide/library.dox \
+ ./docs/user_guide/programming_model.dox \
+ ./docs/user_guide/api.dox \
+ ./docs/user_guide/data_type.dox \
+ ./docs/user_guide/data_layout.dox \
+ ./docs/user_guide/tests.dox \
+ ./docs/user_guide/advanced.dox \
+ ./docs/user_guide/release_version_and_change_log.dox \
+ ./docs/user_guide/errata.dox \
+ ./docs/contributor_guide/contribution_guidelines.dox \
+ ./docs/contributor_guide/adding_operator.dox \
+ ./docs/contributor_guide/implementation_topics.dox \
./docs/ComputeLibrary.dir \
./arm_compute/ \
./src/ \
diff --git a/docs/DoxygenLayout.xml b/docs/DoxygenLayout.xml
new file mode 100644
index 0000000000..fe3844b60d
--- /dev/null
+++ b/docs/DoxygenLayout.xml
@@ -0,0 +1,212 @@
+<doxygenlayout version="1.0">
+ <!-- Generated by doxygen 1.8.15 -->
+ <!-- Navigation index tabs for HTML output -->
+ <navindex>
+ <tab type="usergroup" title="User Guide" url="[none]">
+ <tab type="user" url="@ref introduction" title="Introduction"/>
+ <tab type="user" url="@ref how_to_build" title="How to Build and Run Examples"/>
+ <tab type="user" url="@ref architecture" title="Library Architecture"/>
+ <tab type="user" url="@ref programming_model" title="Programming Model"/>
+ <tab type="user" url="@ref api" title="Application Programming Interface"/>
+ <tab type="user" url="@ref data_type_support" title="Data Type Support"/>
+ <tab type="user" url="@ref data_layout_support" title="Data Layout Support"/>
+ <tab type="user" url="@ref tests" title="Validation and benchmarks"/>
+ <tab type="user" url="@ref advanced" title="Advanced"/>
+ <tab type="user" url="@ref versions_changelogs" title="Release Versions and Changelog"/>
+ <tab type="user" url="@ref errata" title="Errata"/>
+ </tab>
+ <tab type="usergroup" title="Contributor Guide" url="[none]">
+ <tab type="user" url="@ref contribution_guidelines" title="Contribution Guidelines"/>
+ <tab type="user" url="@ref adding_operator" title="How to Add a New Operator"/>
+ <tab type="user" url="@ref implementation_topic" title="Implementation Topics"/>
+ </tab>
+ <tab type="mainpage" visible="no" title=""/>
+ <tab type="pages" visible="no" title="" intro=""/>
+ <tab type="modules" visible="yes" title="" intro=""/>
+ <tab type="namespaces" visible="yes" title="">
+ <tab type="namespacelist" visible="yes" title="" intro=""/>
+ <tab type="namespacemembers" visible="yes" title="" intro=""/>
+ </tab>
+ <tab type="classes" visible="yes" title="">
+ <tab type="classlist" visible="yes" title="" intro=""/>
+ <tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/>
+ <tab type="hierarchy" visible="yes" title="" intro=""/>
+ <tab type="classmembers" visible="yes" title="" intro=""/>
+ </tab>
+ <tab type="files" visible="yes" title="">
+ <tab type="filelist" visible="yes" title="" intro=""/>
+ <tab type="globals" visible="yes" title="" intro=""/>
+ </tab>
+ <tab type="examples" visible="yes" title="" intro=""/>
+ </navindex>
+
+ <!-- Layout definition for a class page -->
+ <class>
+ <briefdescription visible="yes"/>
+ <includes visible="$SHOW_INCLUDE_FILES"/>
+ <inheritancegraph visible="$CLASS_GRAPH"/>
+ <collaborationgraph visible="$COLLABORATION_GRAPH"/>
+ <memberdecl>
+ <nestedclasses visible="yes" title=""/>
+ <publictypes title=""/>
+ <services title=""/>
+ <interfaces title=""/>
+ <publicslots title=""/>
+ <signals title=""/>
+ <publicmethods title=""/>
+ <publicstaticmethods title=""/>
+ <publicattributes title=""/>
+ <publicstaticattributes title=""/>
+ <protectedtypes title=""/>
+ <protectedslots title=""/>
+ <protectedmethods title=""/>
+ <protectedstaticmethods title=""/>
+ <protectedattributes title=""/>
+ <protectedstaticattributes title=""/>
+ <packagetypes title=""/>
+ <packagemethods title=""/>
+ <packagestaticmethods title=""/>
+ <packageattributes title=""/>
+ <packagestaticattributes title=""/>
+ <properties title=""/>
+ <events title=""/>
+ <privatetypes title=""/>
+ <privateslots title=""/>
+ <privatemethods title=""/>
+ <privatestaticmethods title=""/>
+ <privateattributes title=""/>
+ <privatestaticattributes title=""/>
+ <friends title=""/>
+ <related title="" subtitle=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <detaileddescription title=""/>
+ <memberdef>
+ <inlineclasses title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <services title=""/>
+ <interfaces title=""/>
+ <constructors title=""/>
+ <functions title=""/>
+ <related title=""/>
+ <variables title=""/>
+ <properties title=""/>
+ <events title=""/>
+ </memberdef>
+ <allmemberslink visible="yes"/>
+ <usedfiles visible="$SHOW_USED_FILES"/>
+ <authorsection visible="yes"/>
+ </class>
+
+ <!-- Layout definition for a namespace page -->
+ <namespace>
+ <briefdescription visible="yes"/>
+ <memberdecl>
+ <nestednamespaces visible="yes" title=""/>
+ <constantgroups visible="yes" title=""/>
+ <classes visible="yes" title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <detaileddescription title=""/>
+ <memberdef>
+ <inlineclasses title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ </memberdef>
+ <authorsection visible="yes"/>
+ </namespace>
+
+ <!-- Layout definition for a file page -->
+ <file>
+ <briefdescription visible="yes"/>
+ <includes visible="$SHOW_INCLUDE_FILES"/>
+ <includegraph visible="$INCLUDE_GRAPH"/>
+ <includedbygraph visible="$INCLUDED_BY_GRAPH"/>
+ <sourcelink visible="yes"/>
+ <memberdecl>
+ <classes visible="yes" title=""/>
+ <namespaces visible="yes" title=""/>
+ <constantgroups visible="yes" title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <detaileddescription title=""/>
+ <memberdef>
+ <inlineclasses title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ </memberdef>
+ <authorsection/>
+ </file>
+
+ <!-- Layout definition for a group page -->
+ <group>
+ <briefdescription visible="yes"/>
+ <groupgraph visible="$GROUP_GRAPHS"/>
+ <memberdecl>
+ <nestedgroups visible="yes" title=""/>
+ <dirs visible="yes" title=""/>
+ <files visible="yes" title=""/>
+ <namespaces visible="yes" title=""/>
+ <classes visible="yes" title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <enumvalues title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <signals title=""/>
+ <publicslots title=""/>
+ <protectedslots title=""/>
+ <privateslots title=""/>
+ <events title=""/>
+ <properties title=""/>
+ <friends title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <detaileddescription title=""/>
+ <memberdef>
+ <pagedocs/>
+ <inlineclasses title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <enumvalues title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <signals title=""/>
+ <publicslots title=""/>
+ <protectedslots title=""/>
+ <privateslots title=""/>
+ <events title=""/>
+ <properties title=""/>
+ <friends title=""/>
+ </memberdef>
+ <authorsection visible="yes"/>
+ </group>
+
+ <!-- Layout definition for a directory page -->
+ <directory>
+ <briefdescription visible="yes"/>
+ <directorygraph visible="yes"/>
+ <memberdecl>
+ <dirs visible="yes"/>
+ <files visible="yes"/>
+ </memberdecl>
+ <detaileddescription title=""/>
+ </directory>
+</doxygenlayout>
diff --git a/docs/04_adding_operator.dox b/docs/contributor_guide/adding_operator.dox
index aef1bb4af0..697cddb235 100644
--- a/docs/04_adding_operator.dox
+++ b/docs/contributor_guide/adding_operator.dox
@@ -25,10 +25,12 @@
namespace arm_compute
{
/**
-@page add_operator Adding new operators
+@page adding_operator How to Add a New Operator
@tableofcontents
+@section S4_0_introduction Adding new operators
+
@section S4_1_introduction Introduction
In Compute Library there are two main parts or modules:
- The core library consists of a low-level collection of algorithms implemented in C++ and optimized for Arm CPUs and GPUs. The core module is designed to be embedded in other projects and it doesn't perform any memory management or scheduling.
diff --git a/docs/05_contribution_guidelines.dox b/docs/contributor_guide/contribution_guidelines.dox
index 35b9f49dbc..9d854136bd 100644
--- a/docs/05_contribution_guidelines.dox
+++ b/docs/contributor_guide/contribution_guidelines.dox
@@ -24,7 +24,7 @@
namespace arm_compute
{
/**
-@page contribution_guidelines Contribution guidelines
+@page contribution_guidelines Contribution Guidelines
@tableofcontents
diff --git a/docs/contributor_guide/implementation_topics.dox b/docs/contributor_guide/implementation_topics.dox
new file mode 100644
index 0000000000..4afaa6d6a1
--- /dev/null
+++ b/docs/contributor_guide/implementation_topics.dox
@@ -0,0 +1,143 @@
+///
+/// Copyright (c) 2017-2021 Arm Limited.
+///
+/// SPDX-License-Identifier: MIT
+///
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to
+/// deal in the Software without restriction, including without limitation the
+/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+/// sell copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in all
+/// copies or substantial portions of the Software.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+/// SOFTWARE.
+///
+namespace arm_compute
+{
+/** @page implementation_topic Implementation Topics
+
+@section implementation_topic_windows Windows
+
+A @ref Window represents a workload to execute, it can handle up to @ref Coordinates::num_max_dimensions dimensions.
+Each dimension is defined by a start, end and step.
+
+It can split into subwindows as long as *all* the following rules remain true for all the dimensions:
+
+- max[n].start() <= sub[n].start() < max[n].end()
+- sub[n].start() < sub[n].end() <= max[n].end()
+- max[n].step() == sub[n].step()
+- (sub[n].start() - max[n].start()) % max[n].step() == 0
+- (sub[n].end() - sub[n].start()) % max[n].step() == 0
+
+@section implementation_topic_kernels Kernels
+
+Each implementation of the @ref IKernel interface (base class of all the kernels in the core library) works in the same way:
+
+OpenCL kernels:
+
+@code{.cpp}
+// Initialize the CLScheduler with the default context and default command queue
+// Implicitly initializes the CLKernelLibrary to use ./cl_kernels as location for OpenCL kernels files and sets a default device for which OpenCL programs are built.
+CLScheduler::get().default_init();
+
+cl::CommandQueue q = CLScheduler::get().queue();
+//Create a kernel object:
+MyKernel kernel;
+// Initialize the kernel with the input/output and options you want to use:
+kernel.configure( input, output, option0, option1);
+// Retrieve the execution window of the kernel:
+const Window& max_window = kernel.window();
+// Run the whole kernel in the current thread:
+kernel.run( q, max_window ); // Enqueue the kernel to process the full window on the default queue
+
+// Wait for the processing to complete:
+q.finish();
+@endcode
+
+Neon / CPP kernels:
+
+@code{.cpp}
+//Create a kernel object:
+MyKernel kernel;
+// Initialize the kernel with the input/output and options you want to use:
+kernel.configure( input, output, option0, option1);
+// Retrieve the execution window of the kernel:
+const Window& max_window = kernel.window();
+// Run the whole kernel in the current thread:
+kernel.run( max_window ); // Run the kernel on the full window
+@endcode
+
+@section implementation_topic_multithreading Multi-threading
+
+The previous section shows how to run a Arm® Neon™ / CPP kernel in the current thread, however if your system has several CPU cores, you will probably want the kernel to use several cores. Here is how this can be done:
+
+@code{.cpp}
+ ThreadInfo info;
+ info.cpu_info = &_cpu_info;
+
+ const Window &max_window = kernel->window();
+ const unsigned int num_iterations = max_window.num_iterations(split_dimension);
+ info.num_threads = std::min(num_iterations, _num_threads);
+
+ if(num_iterations == 0)
+ {
+ return;
+ }
+
+ if(!kernel->is_parallelisable() || info.num_threads == 1)
+ {
+ kernel->run(max_window, info);
+ }
+ else
+ {
+ int t = 0;
+ auto thread_it = _threads.begin();
+
+ for(; t < info.num_threads - 1; ++t, ++thread_it)
+ {
+ Window win = max_window.split_window(split_dimension, t, info.num_threads);
+ info.thread_id = t;
+ thread_it->start(kernel, win, info);
+ }
+
+ // Run last part on main thread
+ Window win = max_window.split_window(split_dimension, t, info.num_threads);
+ info.thread_id = t;
+ kernel->run(win, info);
+
+ try
+ {
+ for(auto &thread : _threads)
+ {
+ thread.wait();
+ }
+ }
+ catch(const std::system_error &e)
+ {
+ std::cerr << "Caught system_error with code " << e.code() << " meaning " << e.what() << '\n';
+ }
+ }
+@endcode
+
+This is a very basic implementation which was originally used in the Arm® Neon™ runtime library by all the Arm® Neon™ functions.
+
+@sa CPPScheduler
+
+@note Some kernels need some local temporary buffer to perform their calculations. In order to avoid memory corruption between threads, the local buffer must be of size: ```memory_needed_per_thread * num_threads``` and a unique thread_id between 0 and num_threads must be assigned to the @ref ThreadInfo object passed to the ```run``` function.
+
+
+@section implementation_topic_cl_scheduler OpenCL kernel library
+
+All OpenCL kernels used by the library are built and stored in @ref CLKernelLibrary.
+If the library is compiled with embed_kernels=0 the application can set the path to the OpenCL kernels by calling @ref CLKernelLibrary::init(), by default the path is set to "./cl_kernels"
+*/
+} // namespace arm_compute \ No newline at end of file
diff --git a/docs/user_guide/advanced.dox b/docs/user_guide/advanced.dox
new file mode 100644
index 0000000000..86ee2ce756
--- /dev/null
+++ b/docs/user_guide/advanced.dox
@@ -0,0 +1,114 @@
+///
+/// Copyright (c) 2017-2021 Arm Limited.
+///
+/// SPDX-License-Identifier: MIT
+///
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to
+/// deal in the Software without restriction, including without limitation the
+/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+/// sell copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in all
+/// copies or substantial portions of the Software.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+/// SOFTWARE.
+///
+namespace arm_compute
+{
+/** @page advanced Advanced
+
+@tableofcontents
+
+@section S1_8_cl_tuner OpenCL Tuner
+
+The OpenCL tuner, a.k.a. CLTuner, is a module of Arm Compute Library that can improve the performance of the OpenCL kernels tuning the Local-Workgroup-Size (LWS).
+The optimal LWS for each unique OpenCL kernel configuration is stored in a table. This table can be either imported or exported from/to a file.
+The OpenCL tuner runs the same OpenCL kernel for a range of local workgroup sizes and keeps the local workgroup size of the fastest run to use in subsequent calls to the kernel. It supports three modes of tuning with different trade-offs between the time taken to tune and the kernel execution time achieved using the best LWS found. In the Exhaustive mode, it searches all the supported values of LWS. This mode takes the longest time to tune and is the most likely to find the optimal LWS. Normal mode searches a subset of LWS values to yield a good approximation of the optimal LWS. It takes less time to tune than Exhaustive mode. Rapid mode takes the shortest time to tune and finds an LWS value that is at least as good or better than the default LWS value. The mode affects only the search for the optimal LWS and has no effect when the LWS value is imported from a file.
+In order for the performance numbers to be meaningful you must disable the GPU power management and set it to a fixed frequency for the entire duration of the tuning phase.
+
+If you wish to know more about LWS and the important role on improving the GPU cache utilization, we suggest having a look at the presentation "Even Faster CNNs: Exploring the New Class of Winograd Algorithms available at the following link:
+
+https://www.embedded-vision.com/platinum-members/arm/embedded-vision-training/videos/pages/may-2018-embedded-vision-summit-iodice
+
+Tuning a network from scratch can be long and affect considerably the execution time for the first run of your network. It is recommended for this reason to store the CLTuner's result in a file to amortize this time when you either re-use the same network or the functions with the same configurations. The tuning is performed only once for each OpenCL kernel.
+
+CLTuner looks for the optimal LWS for each unique OpenCL kernel configuration. Since a function (i.e. Convolution Layer, Pooling Layer, Fully Connected Layer ...) can be called multiple times but with different parameters, we associate an "id" (called "config_id") to each kernel to distinguish the unique configurations.
+
+ #Example: 2 unique Matrix Multiply configurations
+@code{.cpp}
+ TensorShape a0 = TensorShape(32,32);
+ TensorShape b0 = TensorShape(32,32);
+ TensorShape c0 = TensorShape(32,32);
+ TensorShape a1 = TensorShape(64,64);
+ TensorShape b1 = TensorShape(64,64);
+ TensorShape c1 = TensorShape(64,64);
+
+ Tensor a0_tensor;
+ Tensor b0_tensor;
+ Tensor c0_tensor;
+ Tensor a1_tensor;
+ Tensor b1_tensor;
+ Tensor c1_tensor;
+
+ a0_tensor.allocator()->init(TensorInfo(a0, 1, DataType::F32));
+ b0_tensor.allocator()->init(TensorInfo(b0, 1, DataType::F32));
+ c0_tensor.allocator()->init(TensorInfo(c0, 1, DataType::F32));
+ a1_tensor.allocator()->init(TensorInfo(a1, 1, DataType::F32));
+ b1_tensor.allocator()->init(TensorInfo(b1, 1, DataType::F32));
+ c1_tensor.allocator()->init(TensorInfo(c1 1, DataType::F32));
+
+ CLGEMM gemm0;
+ CLGEMM gemm1;
+
+ // Configuration 0
+ gemm0.configure(&a0, &b0, nullptr, &c0, 1.0f, 0.0f);
+
+ // Configuration 1
+ gemm1.configure(&a1, &b1, nullptr, &c1, 1.0f, 0.0f);
+@endcode
+
+@subsection S1_8_1_cl_tuner_how_to How to use it
+
+All the graph examples in the Compute Library's folder "examples" and the arm_compute_benchmark accept an argument to enable the OpenCL tuner and an argument to export/import the LWS values to/from a file
+
+ #Enable CL tuner
+ ./graph_mobilenet --enable-tuner –-target=CL
+ ./arm_compute_benchmark --enable-tuner
+
+ #Export/Import to/from a file
+ ./graph_mobilenet --enable-tuner --target=CL --tuner-file=acl_tuner.csv
+ ./arm_compute_benchmark --enable-tuner --tuner-file=acl_tuner.csv
+
+If you are importing the CLTuner'results from a file, the new tuned LWS values will be appended to it.
+
+Either you are benchmarking the graph examples or the test cases in the arm_compute_benchmark remember to:
+
+ -# Disable the power management
+ -# Keep the GPU frequency constant
+ -# Run multiple times the network (i.e. 10).
+
+If you are not using the graph API or the benchmark infrastructure you will need to manually pass a CLTuner object to CLScheduler before configuring any function.
+
+@code{.cpp}
+CLTuner tuner;
+
+// Setup Scheduler
+CLScheduler::get().default_init(&tuner);
+@endcode
+
+After the first run, the CLTuner's results can be exported to a file using the method "save_to_file()".
+- tuner.save_to_file("results.csv");
+
+This file can be also imported using the method "load_from_file("results.csv")".
+- tuner.load_from_file("results.csv");
+
+*/
+} // namespace \ No newline at end of file
diff --git a/docs/08_api.dox b/docs/user_guide/api.dox
index 39282046a9..39282046a9 100644
--- a/docs/08_api.dox
+++ b/docs/user_guide/api.dox
diff --git a/docs/user_guide/data_layout.dox b/docs/user_guide/data_layout.dox
new file mode 100644
index 0000000000..48f15acd63
--- /dev/null
+++ b/docs/user_guide/data_layout.dox
@@ -0,0 +1,41 @@
+///
+/// Copyright (c) 2021 Arm Limited.
+///
+/// SPDX-License-Identifier: MIT
+///
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to
+/// deal in the Software without restriction, including without limitation the
+/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+/// sell copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in all
+/// copies or substantial portions of the Software.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+/// SOFTWARE.
+///
+
+namespace arm_compute
+{
+/**
+@page data_layout_support Data Layout Support
+
+@section data_layout_support_supported_data_layout Supported Data Layouts
+
+Compute Library supports the follwing data layouts and
+the right-most letter represents the fastest changing dimension:
+
+- NHWC: The native layout of Compute Library that delivers the best performance where channels are in the fastest changing dimension
+- NCHW: Legacy layout where width is in the fastest changing dimension
+
+, where N = batch, C = channel, H = height, W = width.
+
+*/
+} // namespace
diff --git a/docs/user_guide/data_type.dox b/docs/user_guide/data_type.dox
new file mode 100644
index 0000000000..7083270a07
--- /dev/null
+++ b/docs/user_guide/data_type.dox
@@ -0,0 +1,47 @@
+///
+/// Copyright (c) 2021 Arm Limited.
+///
+/// SPDX-License-Identifier: MIT
+///
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to
+/// deal in the Software without restriction, including without limitation the
+/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+/// sell copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in all
+/// copies or substantial portions of the Software.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+/// SOFTWARE.
+///
+namespace arm_compute
+{
+/**
+@page data_type_support Data Type Support
+
+@tableofcontents
+
+@section data_type_support_supported_data_type Supported Data Types
+
+Compute Library supports the following list of data types. More detailed information
+can be found from the documentation of each operator since the data types supported
+by each operator vary.
+
+- BFLOAT16: 16-bit non-standard brain floating point
+- QASYMM8: 8-bit unsigned asymmetric quantized
+- QASYMM8_SIGNED: 8-bit signed asymmetric quantized
+- QSYMM8_PER_CHANNEL: 8-bit signed symmetric quantized (Used for the weights)
+- QSYMM8: 8-bit unsigned symmetric quantized
+- QSYMM16: 16-bit unsigned symmetric quantized
+- F32: 32-bit single precision floating point
+- F16: 16-bit half precision floating point
+- S32: 32-bit signed integer
+*/
+} // namespace
diff --git a/docs/07_errata.dox b/docs/user_guide/errata.dox
index 0c8d684017..0c8d684017 100644
--- a/docs/07_errata.dox
+++ b/docs/user_guide/errata.dox
diff --git a/docs/user_guide/how_to_build_and_run_examples.dox b/docs/user_guide/how_to_build_and_run_examples.dox
new file mode 100644
index 0000000000..e57183e891
--- /dev/null
+++ b/docs/user_guide/how_to_build_and_run_examples.dox
@@ -0,0 +1,541 @@
+///
+/// Copyright (c) 2017-2021 Arm Limited.
+///
+/// SPDX-License-Identifier: MIT
+///
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to
+/// deal in the Software without restriction, including without limitation the
+/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+/// sell copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in all
+/// copies or substantial portions of the Software.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+/// SOFTWARE.
+///
+namespace arm_compute
+{
+/** @page how_to_build How to Build and Run Examples
+
+@tableofcontents
+
+@section S1_1_build_options Build options
+
+scons 2.3 or above is required to build the library.
+To see the build options available simply run ```scons -h```:
+
+ debug: Debug (yes|no)
+ default: False
+
+ asserts: Enable asserts (this flag is forced to 1 for debug=1) (yes|no)
+ default: False
+
+ logging: Logging (this flag is forced to 1 for debug=1) (yes|no)
+ default: False
+
+ arch: Target Architecture (armv7a|arm64-v8a|arm64-v8.2-a|arm64-v8.2-a-sve|arm64-v8.2-a-sve2|x86_32|x86_64|armv8a|armv8.2-a|armv8.2-a-sve|armv8.6-a|armv8.6-a-sve|armv8.6-a-sve2|armv8r64|x86)
+ default: armv7a
+
+ estate: Execution State (auto|32|64)
+ default: auto
+
+ os: Target OS (linux|android|macos|tizen|bare_metal)
+ default: linux
+
+ build: Build type (native|cross_compile|embed_only)
+ default: cross_compile
+
+ examples: Build example programs (yes|no)
+ default: True
+
+ gemm_tuner: Build gemm_tuner programs (yes|no)
+ default: True
+
+ Werror: Enable/disable the -Werror compilation flag (yes|no)
+ default: True
+
+ standalone: Builds the tests as standalone executables, links statically with libgcc, libstdc++ and libarm_compute (yes|no)
+ default: False
+
+ opencl: Enable OpenCL support (yes|no)
+ default: True
+
+ neon: Enable Arm® Neon™ support (yes|no)
+ default: False
+
+ embed_kernels: Embed OpenCL kernels in library binary (yes|no)
+ default: True
+
+ compress_kernels: Compress embedded OpenCL kernels in library binary. Note embed_kernels should be enabled as well (yes|no)
+ default: False
+
+ set_soname: Set the library's soname and shlibversion (requires SCons 2.4 or above) (yes|no)
+ default: False
+
+ openmp: Enable OpenMP backend (yes|no)
+ default: False
+
+ cppthreads: Enable C++11 threads backend (yes|no)
+ default: True
+
+ build_dir: Specify sub-folder for the build ( /path/to/build_dir )
+ default: .
+
+ install_dir: Specify sub-folder for the install ( /path/to/install_dir )
+ default:
+
+ exceptions: Enable/disable C++ exception support (yes|no)
+ default: True
+
+ linker_script: Use an external linker script ( /path/to/linker_script )
+ default:
+
+ custom_options: Custom options that can be used to turn on/off features
+ (all|none|comma-separated list of names)
+ allowed names: disable_mmla_fp
+ default: none
+
+ data_type_support: Enable a list of data types to support
+ (all|none|comma-separated list of names)
+ allowed names: qasymm8 qasymm8_signed qsymm16 fp16 fp32
+ default: all
+
+ toolchain_prefix: Override the toolchain prefix
+ default:
+
+ compiler_prefix: Override the compiler prefix
+ default:
+
+ extra_cxx_flags: Extra CXX flags to be appended to the build command
+ default:
+
+ extra_link_flags: Extra LD flags to be appended to the build command
+ default:
+
+ compiler_cache: Command to prefix to the C and C++ compiler (e.g ccache)
+ default:
+
+ specs_file: Specs file to use
+ default: rdimon.specs
+
+ benchmark_examples: Build benchmark examples programs (yes|no)
+ default: False
+
+ validate_examples: Build validate examples programs (yes|no)
+ default: False
+
+ reference_openmp: Build reference validation with openmp (yes|no)
+ default: True
+
+ validation_tests: Build validation test programs (yes|no)
+ default: False
+
+ benchmark_tests: Build benchmark test programs (yes|no)
+ default: False
+
+ test_filter: Pattern to specify the tests' filenames to be compiled
+ default: *.cpp
+
+ pmu: Enable PMU counters (yes|no)
+ default: False
+
+ mali: Enable Arm® Mali™ hardware counters (yes|no)
+ default: False
+
+ external_tests_dir: Add examples, benchmarks and tests to the tests suite from an external path ( /path/to/external_tests_dir )
+ default:
+
+@b debug / @b asserts:
+ - With debug=1 asserts are enabled, and the library is built with symbols and no optimisations enabled.
+ - With debug=0 and asserts=1: Optimisations are enabled and symbols are removed, however all the asserts are still present (This is about 20% slower than the release build)
+ - With debug=0 and asserts=0: All optimisations are enable and no validation is performed, if the application misuses the library it is likely to result in a crash. (Only use this mode once you are sure your application is working as expected).
+
+@b arch: The x86_32 and x86_64 targets can only be used with neon=0 and opencl=1.
+
+@b os: Choose the operating system you are targeting: Linux, Android or bare metal.
+@note bare metal can only be used for Arm® Neon™ (not OpenCL), only static libraries get built and Neon's multi-threading support is disabled.
+
+@b build: you can either build directly on your device (native) or cross compile from your desktop machine (cross-compile). In both cases make sure the compiler is available in your path.
+
+@note If you want to natively compile for 32bit on a 64bit Arm device running a 64bit OS then you will have to use cross-compile too.
+
+There is also an 'embed_only' option which will generate all the .embed files for the OpenCL kernels. This might be useful if using a different build system to compile the library.
+
+In addittion the option 'compress_kernels' will compress the embedded OpenCL kernel files using zlib and inject them in the library. This is useful for reducing the binary size. Note, this option is only available for Android when 'embed_kernels' is enabled.
+
+@b Werror: If you are compiling using the same toolchains as the ones used in this guide then there shouldn't be any warning and therefore you should be able to keep Werror=1. If with a different compiler version the library fails to build because of warnings interpreted as errors then, if you are sure the warnings are not important, you might want to try to build with Werror=0 (But please do report the issue on Github).
+
+@b opencl / @b neon: Choose which SIMD technology you want to target. (Neon for Arm Cortex-A CPUs or OpenCL for Arm® Mali™ GPUs)
+
+@b embed_kernels: For OpenCL only: set embed_kernels=1 if you want the OpenCL kernels to be built in the library's binaries instead of being read from separate ".cl" / ".cs" files. If embed_kernels is set to 0 then the application can set the path to the folder containing the OpenCL kernel files by calling CLKernelLibrary::init(). By default the path is set to "./cl_kernels".
+
+@b set_soname: Do you want to build the versioned version of the library ?
+
+If enabled the library will contain a SONAME and SHLIBVERSION and some symlinks will automatically be created between the objects.
+Example:
+ libarm_compute_core.so -> libarm_compute_core.so.1.0.0
+ libarm_compute_core.so.1 -> libarm_compute_core.so.1.0.0
+ libarm_compute_core.so.1.0.0
+
+@note This options is disabled by default as it requires SCons version 2.4 or above.
+
+@b extra_cxx_flags: Custom CXX flags which will be appended to the end of the build command.
+
+@b build_dir: Build the library in a subfolder of the "build" folder. (Allows to build several configurations in parallel).
+
+@b examples: Build or not the examples
+
+@b validation_tests: Enable the build of the validation suite.
+
+@b benchmark_tests: Enable the build of the benchmark tests
+
+@b pmu: Enable the PMU cycle counter to measure execution time in benchmark tests. (Your device needs to support it)
+
+@b mali: Enable the collection of Arm® Mali™ hardware counters to measure execution time in benchmark tests. (Your device needs to have a Arm® Mali™ driver that supports it)
+
+@b openmp Build in the OpenMP scheduler for Neon.
+
+@note Only works when building with g++ not clang++
+
+@b cppthreads Build in the C++11 scheduler for Neon.
+
+@sa Scheduler::set
+
+@b external_tests_dir Add examples, benchmarks and tests to the tests suite from an external path ( /path/to/external_tests_dir )
+
+In order to use this option, the external tests directory must have the following structure:
+
+ EXTERNAL_TESTS_DIR:
+ └── tests
+ ├── benchmark
+ │   ├── CL
+ │   ├── datasets
+ │   ├── fixtures
+ │   └── Neon
+ └── validation
+    ├── CL
+     ├── datasets
+     ├── fixtures
+     └── Neon
+
+Then, build the library with `external_tests_dir=<PATH_TO_EXTERNAL_TESTS_DIR>`.
+
+@section S1_2_linux Building for Linux
+
+@subsection S1_2_1_library How to build the library ?
+
+For Linux, the library was successfully built and tested using the following Linaro GCC toolchain:
+
+ - gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf
+ - gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu
+
+To cross-compile the library in debug mode, with Arm® Neon™ only support, for Linux 32bit:
+
+ scons Werror=1 -j8 debug=1 neon=1 opencl=0 os=linux arch=armv7a
+
+To cross-compile the library in asserts mode, with OpenCL only support, for Linux 64bit:
+
+ scons Werror=1 -j8 debug=0 asserts=1 neon=0 opencl=1 embed_kernels=1 os=linux arch=arm64-v8a
+
+You can also compile the library natively on an Arm device by using <b>build=native</b>:
+
+ scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux arch=arm64-v8a build=native
+ scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux arch=armv7a build=native
+
+@note g++ for Arm is mono-arch, therefore if you want to compile for Linux 32bit on a Linux 64bit platform you will have to use a cross compiler.
+
+For example on a 64bit Debian based system you would have to install <b>g++-arm-linux-gnueabihf</b>
+
+ apt-get install g++-arm-linux-gnueabihf
+
+Then run
+
+ scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux arch=armv7a build=cross_compile
+
+or simply remove the build parameter as build=cross_compile is the default value:
+
+ scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux arch=armv7a
+
+@subsection S1_2_2_examples How to manually build the examples ?
+
+The examples get automatically built by scons as part of the build process of the library described above. This section just describes how you can build and link your own application against our library.
+
+@note The following command lines assume the arm_compute libraries are present in the current directory or in the system library path. If this is not the case you can specify the location of the pre-built libraries with the compiler option -L. When building the OpenCL example the commands below assume that the CL headers are located in the include folder where the command is executed.
+
+To cross compile a Arm® Neon™ example for Linux 32bit:
+
+ arm-linux-gnueabihf-g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute -larm_compute_core -o neon_convolution
+
+To cross compile a Arm® Neon™ example for Linux 64bit:
+
+ aarch64-linux-gnu-g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -L. -larm_compute -larm_compute_core -o neon_convolution
+
+(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
+
+To cross compile an OpenCL example for Linux 32bit:
+
+ arm-linux-gnueabihf-g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
+
+To cross compile an OpenCL example for Linux 64bit:
+
+ aarch64-linux-gnu-g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -L. -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
+
+(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
+
+To cross compile the examples with the Graph API, such as graph_lenet.cpp, you need to link the examples against arm_compute_graph.so too.
+
+i.e. to cross compile the "graph_lenet" example for Linux 32bit:
+
+ arm-linux-gnueabihf-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+
+i.e. to cross compile the "graph_lenet" example for Linux 64bit:
+
+ aarch64-linux-gnu-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+
+(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
+
+@note If compiling using static libraries, this order must be followed when linking: arm_compute_graph_static, arm_compute, arm_compute_core
+
+To compile natively (i.e directly on an Arm device) for Arm® Neon™ for Linux 32bit:
+
+ g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -mfpu=neon -larm_compute -larm_compute_core -o neon_convolution
+
+To compile natively (i.e directly on an Arm device) for Arm® Neon™ for Linux 64bit:
+
+ g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute -larm_compute_core -o neon_convolution
+
+(notice the only difference with the 32 bit command is that we don't need the -mfpu option)
+
+To compile natively (i.e directly on an Arm device) for OpenCL for Linux 32bit or Linux 64bit:
+
+ g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
+
+To compile natively the examples with the Graph API, such as graph_lenet.cpp, you need to link the examples against arm_compute_graph.so too.
+
+i.e. to natively compile the "graph_lenet" example for Linux 32bit:
+
+ g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+
+i.e. to natively compile the "graph_lenet" example for Linux 64bit:
+
+ g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+
+(notice the only difference with the 32 bit command is that we don't need the -mfpu option)
+
+@note If compiling using static libraries, this order must be followed when linking: arm_compute_graph_static, arm_compute, arm_compute_core
+
+@note These two commands assume libarm_compute.so is available in your library path, if not add the path to it using -L (e.g. -Llib/linux-arm64-v8a-neon-cl-asserts/)
+@note You might need to export the path to OpenCL library as well in your LD_LIBRARY_PATH if Compute Library was built with OpenCL enabled.
+
+To run the built executable simply run:
+
+ LD_LIBRARY_PATH=build ./neon_convolution
+
+or
+
+ LD_LIBRARY_PATH=build ./cl_convolution
+
+@note Examples accept different types of arguments, to find out what they are run the example with \a --help as an argument. If no arguments are specified then random values will be used to execute the graph.
+
+For example:
+
+ LD_LIBRARY_PATH=. ./graph_lenet --help
+
+Below is a list of the common parameters among the graph examples :
+@snippet utils/CommonGraphOptions.h Common graph examples parameters
+
+@subsection S1_2_3_sve Build for SVE or SVE2
+
+In order to build for SVE or SVE2 you need a compiler that supports them. You can find more information in the following these links:
+ -# GCC: https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/sve-support
+ -# LLVM: https://developer.arm.com/tools-and-software/open-source-software/developer-tools/llvm-toolchain/sve-support
+
+@note You the need to indicate the toolchains using the scons "toolchain_prefix" parameter.
+
+An example build command with SVE is:
+
+ scons arch=arm64-v8.2-a-sve os=linux build_dir=arm64 -j55 standalone=0 opencl=0 openmp=0 validation_tests=1 neon=1 cppthreads=1 toolchain_prefix=aarch64-none-linux-gnu-
+
+@section S1_3_android Building for Android
+
+For Android, the library was successfully built and tested using Google's standalone toolchains:
+ - clang++ from NDK r18b for armv7a
+ - clang++ from NDK r20b for arm64-v8a
+ - clang++ from NDK r20b for arm64-v8.2-a with FP16 support
+
+For NDK r18 or older, here is a guide to <a href="https://developer.android.com/ndk/guides/standalone_toolchain.html">create your Android standalone toolchains from the NDK</a>:
+- Download the NDK r18b from here: https://developer.android.com/ndk/downloads/index.html to directory $NDK
+- Make sure you have Python 2.7 installed on your machine.
+- Generate the 32 and/or 64 toolchains by running the following commands to your toolchain dirctory $MY_TOOLCHAINS:
+
+ $NDK/build/tools/make_standalone_toolchain.py --arch arm64 --install-dir $MY_TOOLCHAINS/aarch64-linux-android-ndk-r18b --stl libc++ --api 21
+ $NDK/build/tools/make_standalone_toolchain.py --arch arm --install-dir $MY_TOOLCHAINS/arm-linux-android-ndk-r18b --stl libc++ --api 21
+
+For NDK r19 or newer, you can directly <a href="https://developer.android.com/ndk/downloads">Download</a> the NDK package for your development platform, without the need to launch the make_standalone_toolchain.py script. You can find all the prebuilt binaries inside $NDK/toolchains/llvm/prebuilt/$OS_ARCH/bin/.
+@attention the building script will look for a binary named "aarch64-linux-android-clang++", while the prebuilt binaries will have their API version as a suffix to their filename (e.g. "aarch64-linux-android21-clang++"). You should copy/rename the binary removing this suffix, or - alternatively - create an alias for it.
+
+@attention We used to use gnustl but as of NDK r17 it is deprecated so we switched to libc++
+
+@note Make sure to add the toolchains to your PATH:
+
+ export PATH=$PATH:$MY_TOOLCHAINS/aarch64-linux-android-ndk-r18b/bin:$MY_TOOLCHAINS/arm-linux-android-ndk-r18b/bin
+
+@subsection S1_3_1_library How to build the library ?
+
+To cross-compile the library in debug mode, with Arm® Neon™ only support, for Android 32bit:
+
+ CXX=clang++ CC=clang scons Werror=1 -j8 debug=1 neon=1 opencl=0 os=android arch=armv7a
+
+To cross-compile the library in asserts mode, with OpenCL only support, for Android 64bit:
+
+ CXX=clang++ CC=clang scons Werror=1 -j8 debug=0 asserts=1 neon=0 opencl=1 embed_kernels=1 os=android arch=arm64-v8a
+
+@subsection S1_3_2_examples How to manually build the examples ?
+
+The examples get automatically built by scons as part of the build process of the library described above. This section just describes how you can build and link your own application against our library.
+
+@note The following command lines assume the arm_compute libraries are present in the current directory or in the system library path. If this is not the case you can specify the location of the pre-built libraries with the compiler option -L. When building the OpenCL example the commands below assume that the CL headers are located in the include folder where the command is executed.
+
+Once you've got your Android standalone toolchain built and added to your path you can do the following:
+
+To cross compile a Arm® Neon™ example:
+
+ #32 bit:
+ arm-linux-androideabi-clang++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o neon_convolution_arm -static-libstdc++ -pie
+ #64 bit:
+ aarch64-linux-android-clang++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o neon_convolution_aarch64 -static-libstdc++ -pie
+
+To cross compile an OpenCL example:
+
+ #32 bit:
+ arm-linux-androideabi-clang++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o cl_convolution_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
+ #64 bit:
+ aarch64-linux-android-clang++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o cl_convolution_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
+
+To cross compile the examples with the Graph API, such as graph_lenet.cpp, you need to link the library arm_compute_graph also.
+
+ #32 bit:
+ arm-linux-androideabi-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
+ #64 bit:
+ aarch64-linux-android-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
+
+@note Due to some issues in older versions of the Arm® Mali™ OpenCL DDK (<= r13p0), we recommend to link arm_compute statically on Android.
+@note When linked statically the arm_compute_graph library currently needs the --whole-archive linker flag in order to work properly
+
+Then you need to do is upload the executable and the shared library to the device using ADB:
+
+ adb push neon_convolution_arm /data/local/tmp/
+ adb push cl_convolution_arm /data/local/tmp/
+ adb push gc_absdiff_arm /data/local/tmp/
+ adb shell chmod 777 -R /data/local/tmp/
+
+And finally to run the example:
+
+ adb shell /data/local/tmp/neon_convolution_arm
+ adb shell /data/local/tmp/cl_convolution_arm
+ adb shell /data/local/tmp/gc_absdiff_arm
+
+For 64bit:
+
+ adb push neon_convolution_aarch64 /data/local/tmp/
+ adb push cl_convolution_aarch64 /data/local/tmp/
+ adb push gc_absdiff_aarch64 /data/local/tmp/
+ adb shell chmod 777 -R /data/local/tmp/
+
+And finally to run the example:
+
+ adb shell /data/local/tmp/neon_convolution_aarch64
+ adb shell /data/local/tmp/cl_convolution_aarch64
+ adb shell /data/local/tmp/gc_absdiff_aarch64
+
+@note Examples accept different types of arguments, to find out what they are run the example with \a --help as an argument. If no arguments are specified then random values will be used to execute the graph.
+
+For example:
+ adb shell /data/local/tmp/graph_lenet --help
+
+In this case the first argument of LeNet (like all the graph examples) is the target (i.e 0 to run on Neon, 1 to run on OpenCL if available, 2 to run on OpenCL using the CLTuner), the second argument is the path to the folder containing the npy files for the weights and finally the third argument is the number of batches to run.
+
+@section S1_4_macos Building for macOS
+
+The library was successfully natively built for Apple Silicon under macOS 11.1 using clang v12.0.0.
+
+To natively compile the library with accelerated CPU support:
+
+ scons Werror=1 -j8 neon=1 opencl=0 os=macos arch=arm64-v8a build=native
+
+@note Initial support disables feature discovery through HWCAPS and thread scheduling affinity controls
+
+@section S1_5_bare_metal Building for bare metal
+
+For bare metal, the library was successfully built using linaro's latest (gcc-linaro-6.3.1-2017.05) bare metal toolchains:
+ - arm-eabi for armv7a
+ - aarch64-elf for arm64-v8a
+
+Download linaro for <a href="https://releases.linaro.org/components/toolchain/binaries/6.3-2017.05/arm-eabi/">armv7a</a> and <a href="https://releases.linaro.org/components/toolchain/binaries/6.3-2017.05/aarch64-elf/">arm64-v8a</a>.
+
+@note Make sure to add the toolchains to your PATH: export PATH=$PATH:$MY_TOOLCHAINS/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-elf/bin:$MY_TOOLCHAINS/gcc-linaro-6.3.1-2017.05-x86_64_arm-eabi/bin
+
+@subsection S1_5_1_library How to build the library ?
+
+To cross-compile the library with Arm® Neon™ support for baremetal arm64-v8a:
+
+ scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=bare_metal arch=arm64-v8a build=cross_compile cppthreads=0 openmp=0 standalone=1
+
+@subsection S1_5_2_examples How to manually build the examples ?
+
+Examples are disabled when building for bare metal. If you want to build the examples you need to provide a custom bootcode depending on the target architecture and link against the compute library. More information about bare metal bootcode can be found <a href="http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0527a/index.html">here</a>.
+
+@section S1_6_windows_host Building on a Windows host system
+
+Using `scons` directly from the Windows command line is known to cause
+problems. The reason seems to be that if `scons` is setup for cross-compilation
+it gets confused about Windows style paths (using backslashes). Thus it is
+recommended to follow one of the options outlined below.
+
+@subsection S1_6_1_ubuntu_on_windows Bash on Ubuntu on Windows
+
+The best and easiest option is to use
+<a href="https://msdn.microsoft.com/en-gb/commandline/wsl/about">Ubuntu on Windows</a>.
+This feature is still marked as *beta* and thus might not be available.
+However, if it is building the library is as simple as opening a *Bash on
+Ubuntu on Windows* shell and following the general guidelines given above.
+
+@subsection S1_6_2_cygwin Cygwin
+
+If the Windows subsystem for Linux is not available <a href="https://www.cygwin.com/">Cygwin</a>
+can be used to install and run `scons`, the minimum Cygwin version must be 3.0.7 or later. In addition
+to the default packages installed by Cygwin `scons` has to be selected in the installer. (`git` might
+also be useful but is not strictly required if you already have got the source
+code of the library.) Linaro provides pre-built versions of
+<a href="http://releases.linaro.org/components/toolchain/binaries/">GCC cross-compilers</a>
+that can be used from the Cygwin terminal. When building for Android the
+compiler is included in the Android standalone toolchain. After everything has
+been set up in the Cygwin terminal the general guide on building the library
+can be followed.
+
+@section S1_7_cl_requirements OpenCL DDK Requirements
+
+@subsection S1_7_1_cl_hard_requirements Hard Requirements
+
+Compute Library requires OpenCL 1.1 and above with support of non uniform workgroup sizes, which is officially supported in the Arm® Mali™ OpenCL DDK r8p0 and above as an extension (respective extension flag is \a -cl-arm-non-uniform-work-group-size).
+
+Enabling 16-bit floating point calculations require \a cl_khr_fp16 extension to be supported. All Arm® Mali™ GPUs with compute capabilities have native support for half precision floating points.
+
+@subsection S1_7_2_cl_performance_requirements Performance improvements
+
+Integer dot product built-in function extensions (and therefore optimized kernels) are available with Arm® Mali™ OpenCL DDK r22p0 and above for the following GPUs : G71, G76. The relevant extensions are \a cl_arm_integer_dot_product_int8, \a cl_arm_integer_dot_product_accumulate_int8 and \a cl_arm_integer_dot_product_accumulate_int16.
+
+OpenCL kernel level debugging can be simplified with the use of printf, this requires the \a cl_arm_printf extension to be supported.
+
+SVM allocations are supported for all the underlying allocations in Compute Library. To enable this OpenCL 2.0 and above is a requirement.
+
+*/
+} // namespace arm_compute
diff --git a/docs/user_guide/introduction.dox b/docs/user_guide/introduction.dox
new file mode 100644
index 0000000000..a956d7dd52
--- /dev/null
+++ b/docs/user_guide/introduction.dox
@@ -0,0 +1,74 @@
+///
+/// Copyright (c) 2017-2021 Arm Limited.
+///
+/// SPDX-License-Identifier: MIT
+///
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to
+/// deal in the Software without restriction, including without limitation the
+/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+/// sell copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in all
+/// copies or substantial portions of the Software.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+/// SOFTWARE.
+///
+namespace arm_compute
+{
+/** @page introduction Introduction
+
+@tableofcontents
+
+The Compute Library is a collection of low-level machine learning functions optimized for both Arm CPUs and GPUs using SIMD technologies.
+
+Several builds of the library are available using various configurations:
+ - OS: Linux, Android, macOS or bare metal.
+ - Architecture: armv7a (32bit) or arm64-v8a (64bit).
+ - Technology: Arm® Neon™ / OpenCL / Arm® Neon™ and OpenCL.
+ - Debug / Asserts / Release: Use a build with asserts enabled to debug your application and enable extra validation. Once you are sure your application works as expected you can switch to a release build of the library for maximum performance.
+
+@section S0_1_contact Contact / Support
+
+Please create an issue on <a href="https://github.com/ARM-software/ComputeLibrary/issues">Github</a>.
+
+In order to facilitate the work of the support team please provide the build information of the library you are using. To get the version of the library you are using simply run:
+
+ $ strings android-armv7a-cl-asserts/libarm_compute.so | grep arm_compute_version
+ arm_compute_version=v16.12 Build options: {'embed_kernels': '1', 'opencl': '1', 'arch': 'armv7a', 'neon': '0', 'asserts': '1', 'debug': '0', 'os': 'android', 'Werror': '1'} Git hash=f51a545d4ea12a9059fe4e598a092f1fd06dc858
+
+@section S0_2_prebuilt_binaries Pre-built binaries
+
+For each release we provide some pre-built binaries of the library [here](https://github.com/ARM-software/ComputeLibrary/releases)
+
+These binaries have been built using the following toolchains:
+ - Linux armv7a: gcc-linaro-7.2.1-2017.11-x86_64_arm-linux-gnueabihf
+ - Linux arm64-v8a: gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu
+ - Android armv7a: clang++ / libc++ NDK r18b
+ - Android am64-v8a: clang++ / libc++ NDK r20b
+
+@warning Make sure to use a compatible toolchain to build your application or you will get some std::bad_alloc errors at runtime.
+
+@section S0_3_file_organisation File organisation
+
+This archive contains:
+ - The arm_compute header and source files
+ - The latest Khronos OpenCL 1.2 C headers from the <a href="https://www.khronos.org/registry/cl/">Khronos OpenCL registry</a>
+ - The latest Khronos cl2.hpp from the <a href="https://www.khronos.org/registry/cl/">Khronos OpenCL registry</a> (API version 2.1 when this document was written)
+ - The latest Khronos EGL 1.5 C headers from the <a href="https://www.khronos.org/registry/gles/">Khronos EGL registry</a>
+ - The sources for a stub version of libOpenCL.so, libGLESv1_CM.so, libGLESv2.so and libEGL.so to help you build your application.
+ - An examples folder containing a few examples to compile and link against the library.
+ - A @ref utils folder containing headers with some boiler plate code used by the examples.
+ - This documentation.
+
+ For detailed information about file organization, please refer to Files -> File List section of this documentation.
+
+*/
+} // namespace arm_compute
diff --git a/docs/01_library.dox b/docs/user_guide/library.dox
index 25535d111a..2e3cc967ea 100644
--- a/docs/01_library.dox
+++ b/docs/user_guide/library.dox
@@ -24,7 +24,7 @@
namespace arm_compute
{
/**
-@page architecture Library architecture
+@page architecture Library Architecture
@tableofcontents
@@ -43,22 +43,6 @@ The Runtime library is a very basic wrapper around the Core library which can be
For maximum performance, it is expected that the users would re-implement an equivalent to the runtime library which suits better their needs (With a more clever multi-threading strategy, load-balancing between Arm® Neon™ and OpenCL, etc.)
-@section S4_1_2 Data-type and Data-layout support
-
-Compute Library supports a wide list of data-types, information can been directly found in the documentation of each kernel/function.
-The main data-types that the Machine Learning functions support are the following:
-- BFLOAT16: 16-bit non-standard brain floating point
-- F16: 16-bit half precision floating point
-- F32: 32-bit single precision floating point
-- QASYMM8: 8-bit unsigned asymmetric quantized
-- QASYMM8_SIGNED: 8-bit signed asymmetric quantized
-- QSYMM8_PER_CHANNEL: 8-bit signed symmetric quantized (Used for the weights)
-
-Moreover, Compute Library supports the following data layouts (fast changing dimension from right to left):
-- NHWC: The native layout of Compute Library that delivers the best performance where channels are in the fastest changing dimension
-- NCHW: Legacy layout where width is in the fastest changing dimension
-where N = batches, C = channels, H = height, W = width
-
@section S4_1_3 Fast-math support
Compute Library supports different types of convolution methods, fast-math flag is only used for the Winograd algorithm.
@@ -76,159 +60,6 @@ Although the library supports multi-threading during workload dispatch, thus par
This lies to the fact that the provided scheduling mechanism wasn't designed with thread-safety in mind.
As it is true with the rest of the runtime library a custom scheduling mechanism can be re-implemented to account for thread-safety if needed and be injected as the library's default scheduler.
-@section S4_2_windows_kernels_mt_functions Windows, kernels, multi-threading and functions
-
-@subsection S4_2_1_windows Windows
-
-A @ref Window represents a workload to execute, it can handle up to @ref Coordinates::num_max_dimensions dimensions.
-Each dimension is defined by a start, end and step.
-
-It can split into subwindows as long as *all* the following rules remain true for all the dimensions:
-
-- max[n].start() <= sub[n].start() < max[n].end()
-- sub[n].start() < sub[n].end() <= max[n].end()
-- max[n].step() == sub[n].step()
-- (sub[n].start() - max[n].start()) % max[n].step() == 0
-- (sub[n].end() - sub[n].start()) % max[n].step() == 0
-
-@subsection S4_2_2 Kernels
-
-Each implementation of the @ref IKernel interface (base class of all the kernels in the core library) works in the same way:
-
-OpenCL kernels:
-
-@code{.cpp}
-// Initialize the CLScheduler with the default context and default command queue
-// Implicitly initializes the CLKernelLibrary to use ./cl_kernels as location for OpenCL kernels files and sets a default device for which OpenCL programs are built.
-CLScheduler::get().default_init();
-
-cl::CommandQueue q = CLScheduler::get().queue();
-//Create a kernel object:
-MyKernel kernel;
-// Initialize the kernel with the input/output and options you want to use:
-kernel.configure( input, output, option0, option1);
-// Retrieve the execution window of the kernel:
-const Window& max_window = kernel.window();
-// Run the whole kernel in the current thread:
-kernel.run( q, max_window ); // Enqueue the kernel to process the full window on the default queue
-
-// Wait for the processing to complete:
-q.finish();
-@endcode
-
-Neon / CPP kernels:
-
-@code{.cpp}
-//Create a kernel object:
-MyKernel kernel;
-// Initialize the kernel with the input/output and options you want to use:
-kernel.configure( input, output, option0, option1);
-// Retrieve the execution window of the kernel:
-const Window& max_window = kernel.window();
-// Run the whole kernel in the current thread:
-kernel.run( max_window ); // Run the kernel on the full window
-@endcode
-
-@subsection S4_2_3 Multi-threading
-
-The previous section shows how to run a Arm® Neon™ / CPP kernel in the current thread, however if your system has several CPU cores, you will probably want the kernel to use several cores. Here is how this can be done:
-
-@code{.cpp}
- ThreadInfo info;
- info.cpu_info = &_cpu_info;
-
- const Window &max_window = kernel->window();
- const unsigned int num_iterations = max_window.num_iterations(split_dimension);
- info.num_threads = std::min(num_iterations, _num_threads);
-
- if(num_iterations == 0)
- {
- return;
- }
-
- if(!kernel->is_parallelisable() || info.num_threads == 1)
- {
- kernel->run(max_window, info);
- }
- else
- {
- int t = 0;
- auto thread_it = _threads.begin();
-
- for(; t < info.num_threads - 1; ++t, ++thread_it)
- {
- Window win = max_window.split_window(split_dimension, t, info.num_threads);
- info.thread_id = t;
- thread_it->start(kernel, win, info);
- }
-
- // Run last part on main thread
- Window win = max_window.split_window(split_dimension, t, info.num_threads);
- info.thread_id = t;
- kernel->run(win, info);
-
- try
- {
- for(auto &thread : _threads)
- {
- thread.wait();
- }
- }
- catch(const std::system_error &e)
- {
- std::cerr << "Caught system_error with code " << e.code() << " meaning " << e.what() << '\n';
- }
- }
-@endcode
-
-This is a very basic implementation which was originally used in the Arm® Neon™ runtime library by all the Arm® Neon™ functions.
-
-@sa CPPScheduler
-
-@note Some kernels need some local temporary buffer to perform their calculations. In order to avoid memory corruption between threads, the local buffer must be of size: ```memory_needed_per_thread * num_threads``` and a unique thread_id between 0 and num_threads must be assigned to the @ref ThreadInfo object passed to the ```run``` function.
-
-@subsection S4_2_4 Functions
-
-Functions will automatically allocate the temporary buffers mentioned above, and will automatically multi-thread kernels' executions using the very basic scheduler described in the previous section.
-
-Simple functions only call a single kernel (e.g NEConvolution3x3), while more complex ones consist of several kernels pipelined together (e.g @ref NEFullyConnectedLayer ). Check their documentation to find out which kernels are used by each function.
-
-@code{.cpp}
-//Create a function object:
-MyFunction function;
-// Initialize the function with the input/output and options you want to use:
-function.configure( input, output, option0, option1);
-// Execute the function:
-function.run();
-@endcode
-
-@warning The Compute Library requires Arm® Mali™ OpenCL DDK r8p0 or higher (OpenCL kernels are compiled using the -cl-arm-non-uniform-work-group-size flag)
-
-@note All OpenCL functions and objects in the runtime library use the command queue associated with CLScheduler for all operations, a real implementation would be expected to use different queues for mapping operations and kernels in order to reach a better GPU utilization.
-
-@subsection S4_4_1_cl_scheduler OpenCL Scheduler and kernel library
-
-The Compute Library runtime uses a single command queue and context for all the operations.
-
-The user can get / set this context and command queue through CLScheduler's interface.
-
-The user can get / set the target GPU device through the CLScheduler's interface.
-
-@attention Make sure the application is using the same context as the library as in OpenCL it is forbidden to share objects across contexts. This is done by calling @ref CLScheduler::init() or @ref CLScheduler::default_init() at the beginning of your application.
-
-@attention Make sure the scheduler's target is not changed after function classes are created.
-
-All OpenCL kernels used by the library are built and stored in @ref CLKernelLibrary.
-If the library is compiled with embed_kernels=0 the application can set the path to the OpenCL kernels by calling @ref CLKernelLibrary::init(), by default the path is set to "./cl_kernels"
-
-@subsection S4_4_2_events_sync OpenCL events and synchronization
-
-In order to block until all the jobs in the CLScheduler's command queue are done executing the user can call @ref CLScheduler::sync() or create a sync event using @ref CLScheduler::enqueue_sync_event()
-
-@subsection S4_4_2_cl_neon OpenCL / Arm® Neon™ interoperability
-
-You can mix OpenCL and Arm® Neon™ kernels and functions. However it is the user's responsibility to handle the mapping/unmapping of OpenCL objects.
-
@section S4_5_algorithms Algorithms
All computer vision algorithms in this library have been implemented following the [OpenVX 1.1 specifications](https://www.khronos.org/registry/vx/specs/1.1/html/). Please refer to the Khronos documentation for more information.
diff --git a/docs/user_guide/programming_model.dox b/docs/user_guide/programming_model.dox
new file mode 100644
index 0000000000..7990231ba9
--- /dev/null
+++ b/docs/user_guide/programming_model.dox
@@ -0,0 +1,70 @@
+///
+/// Copyright (c) 2017-2021 Arm Limited.
+///
+/// SPDX-License-Identifier: MIT
+///
+/// Permission is hereby granted, free of charge, to any person obtaining a copy
+/// of this software and associated documentation files (the "Software"), to
+/// deal in the Software without restriction, including without limitation the
+/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+/// sell copies of the Software, and to permit persons to whom the Software is
+/// furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in all
+/// copies or substantial portions of the Software.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+/// SOFTWARE.
+///
+namespace arm_compute
+{
+/**
+@page programming_model Programming Model
+
+@tableofcontents
+
+@section programming_model_functions Functions
+
+Functions will automatically allocate the temporary buffers mentioned above, and will automatically multi-thread kernels' executions using the very basic scheduler described in the previous section.
+
+Simple functions only call a single kernel (e.g NEConvolution3x3), while more complex ones consist of several kernels pipelined together (e.g @ref NEFullyConnectedLayer ). Check their documentation to find out which kernels are used by each function.
+
+@code{.cpp}
+//Create a function object:
+MyFunction function;
+// Initialize the function with the input/output and options you want to use:
+function.configure( input, output, option0, option1);
+// Execute the function:
+function.run();
+@endcode
+
+@warning The Compute Library requires Arm® Mali™ OpenCL DDK r8p0 or higher (OpenCL kernels are compiled using the -cl-arm-non-uniform-work-group-size flag)
+
+@note All OpenCL functions and objects in the runtime library use the command queue associated with CLScheduler for all operations, a real implementation would be expected to use different queues for mapping operations and kernels in order to reach a better GPU utilization.
+
+@section programming_model_scheduler OpenCL Scheduler
+
+The Compute Library runtime uses a single command queue and context for all the operations.
+
+The user can get / set this context and command queue through CLScheduler's interface.
+
+The user can get / set the target GPU device through the CLScheduler's interface.
+
+@attention Make sure the application is using the same context as the library as in OpenCL it is forbidden to share objects across contexts. This is done by calling @ref CLScheduler::init() or @ref CLScheduler::default_init() at the beginning of your application.
+
+@attention Make sure the scheduler's target is not changed after function classes are created.
+
+@section programming_model__events_sync OpenCL events and synchronization
+
+In order to block until all the jobs in the CLScheduler's command queue are done executing the user can call @ref CLScheduler::sync() or create a sync event using @ref CLScheduler::enqueue_sync_event()
+
+@section programming_model_cl_neon OpenCL / Arm® Neon™ interoperability
+
+You can mix OpenCL and Arm® Neon™ kernels and functions. However it is the user's responsibility to handle the mapping/unmapping of OpenCL objects.
+*/
+} // namespace arm_compute
diff --git a/docs/00_introduction.dox b/docs/user_guide/release_version_and_change_log.dox
index 68533852e6..b9e3b37263 100644
--- a/docs/00_introduction.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -23,56 +23,11 @@
///
namespace arm_compute
{
-/** @mainpage Introduction
+/** @page versions_changelogs Release Versions and Changelog
@tableofcontents
-The Compute Library is a collection of low-level machine learning functions optimized for both Arm CPUs and GPUs using SIMD technologies.
-
-Several builds of the library are available using various configurations:
- - OS: Linux, Android, macOS or bare metal.
- - Architecture: armv7a (32bit) or arm64-v8a (64bit).
- - Technology: Arm® Neon™ / OpenCL / Arm® Neon™ and OpenCL.
- - Debug / Asserts / Release: Use a build with asserts enabled to debug your application and enable extra validation. Once you are sure your application works as expected you can switch to a release build of the library for maximum performance.
-
-@section S0_1_contact Contact / Support
-
-Please create an issue on <a href="https://github.com/ARM-software/ComputeLibrary/issues">Github</a>.
-
-In order to facilitate the work of the support team please provide the build information of the library you are using. To get the version of the library you are using simply run:
-
- $ strings android-armv7a-cl-asserts/libarm_compute.so | grep arm_compute_version
- arm_compute_version=v16.12 Build options: {'embed_kernels': '1', 'opencl': '1', 'arch': 'armv7a', 'neon': '0', 'asserts': '1', 'debug': '0', 'os': 'android', 'Werror': '1'} Git hash=f51a545d4ea12a9059fe4e598a092f1fd06dc858
-
-@section S0_2_prebuilt_binaries Pre-built binaries
-
-For each release we provide some pre-built binaries of the library [here](https://github.com/ARM-software/ComputeLibrary/releases)
-
-These binaries have been built using the following toolchains:
- - Linux armv7a: gcc-linaro-7.2.1-2017.11-x86_64_arm-linux-gnueabihf
- - Linux arm64-v8a: gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu
- - Android armv7a: clang++ / libc++ NDK r18b
- - Android am64-v8a: clang++ / libc++ NDK r20b
-
-@warning Make sure to use a compatible toolchain to build your application or you will get some std::bad_alloc errors at runtime.
-
-@section S1_file_organisation File organisation
-
-This archive contains:
- - The arm_compute header and source files
- - The latest Khronos OpenCL 1.2 C headers from the <a href="https://www.khronos.org/registry/cl/">Khronos OpenCL registry</a>
- - The latest Khronos cl2.hpp from the <a href="https://www.khronos.org/registry/cl/">Khronos OpenCL registry</a> (API version 2.1 when this document was written)
- - The latest Khronos EGL 1.5 C headers from the <a href="https://www.khronos.org/registry/gles/">Khronos EGL registry</a>
- - The sources for a stub version of libOpenCL.so, libGLESv1_CM.so, libGLESv2.so and libEGL.so to help you build your application.
- - An examples folder containing a few examples to compile and link against the library.
- - A @ref utils folder containing headers with some boiler plate code used by the examples.
- - This documentation.
-
- For detailed information about file organization, please refer to Files -> File List section of this documentation.
-
-@section S2_versions_changelog Release versions and changelog
-
-@subsection S2_1_versions Release versions
+@section S2_1_versions Release versions
All releases are numbered vYY.MM Where YY are the last two digits of the year, and MM the month number.
If there is more than one release in a month then an extra sequential number is appended at the end:
@@ -83,7 +38,7 @@ If there is more than one release in a month then an extra sequential number is
@note We're aiming at releasing one major public release with new features per quarter. All releases in between will only contain bug fixes.
-@subsection S2_2_changelog Changelog
+@section S2_2_changelog Changelog
v21.05 Public major release
- Removed computer vision support from Arm® Neon™ backend
@@ -1430,599 +1385,5 @@ v17.02 Sources preview
v16.12 Binary preview release
- Original release
-@section S3_how_to_build How to build the library and the examples
-
-@subsection S3_1_build_options Build options
-
-scons 2.3 or above is required to build the library.
-To see the build options available simply run ```scons -h```:
-
- debug: Debug (yes|no)
- default: False
-
- asserts: Enable asserts (this flag is forced to 1 for debug=1) (yes|no)
- default: False
-
- logging: Logging (this flag is forced to 1 for debug=1) (yes|no)
- default: False
-
- arch: Target Architecture (armv7a|arm64-v8a|arm64-v8.2-a|arm64-v8.2-a-sve|arm64-v8.2-a-sve2|x86_32|x86_64|armv8a|armv8.2-a|armv8.2-a-sve|armv8.6-a|armv8.6-a-sve|armv8.6-a-sve2|armv8r64|x86)
- default: armv7a
-
- estate: Execution State (auto|32|64)
- default: auto
-
- os: Target OS (linux|android|macos|tizen|bare_metal)
- default: linux
-
- build: Build type (native|cross_compile|embed_only)
- default: cross_compile
-
- examples: Build example programs (yes|no)
- default: True
-
- gemm_tuner: Build gemm_tuner programs (yes|no)
- default: True
-
- Werror: Enable/disable the -Werror compilation flag (yes|no)
- default: True
-
- standalone: Builds the tests as standalone executables, links statically with libgcc, libstdc++ and libarm_compute (yes|no)
- default: False
-
- opencl: Enable OpenCL support (yes|no)
- default: True
-
- neon: Enable Arm® Neon™ support (yes|no)
- default: False
-
- embed_kernels: Embed OpenCL kernels in library binary (yes|no)
- default: True
-
- compress_kernels: Compress embedded OpenCL kernels in library binary. Note embed_kernels should be enabled as well (yes|no)
- default: False
-
- set_soname: Set the library's soname and shlibversion (requires SCons 2.4 or above) (yes|no)
- default: False
-
- openmp: Enable OpenMP backend (yes|no)
- default: False
-
- cppthreads: Enable C++11 threads backend (yes|no)
- default: True
-
- build_dir: Specify sub-folder for the build ( /path/to/build_dir )
- default: .
-
- install_dir: Specify sub-folder for the install ( /path/to/install_dir )
- default:
-
- exceptions: Enable/disable C++ exception support (yes|no)
- default: True
-
- linker_script: Use an external linker script ( /path/to/linker_script )
- default:
-
- custom_options: Custom options that can be used to turn on/off features
- (all|none|comma-separated list of names)
- allowed names: disable_mmla_fp
- default: none
-
- data_type_support: Enable a list of data types to support
- (all|none|comma-separated list of names)
- allowed names: qasymm8 qasymm8_signed qsymm16 fp16 fp32
- default: all
-
- toolchain_prefix: Override the toolchain prefix
- default:
-
- compiler_prefix: Override the compiler prefix
- default:
-
- extra_cxx_flags: Extra CXX flags to be appended to the build command
- default:
-
- extra_link_flags: Extra LD flags to be appended to the build command
- default:
-
- compiler_cache: Command to prefix to the C and C++ compiler (e.g ccache)
- default:
-
- specs_file: Specs file to use
- default: rdimon.specs
-
- benchmark_examples: Build benchmark examples programs (yes|no)
- default: False
-
- validate_examples: Build validate examples programs (yes|no)
- default: False
-
- reference_openmp: Build reference validation with openmp (yes|no)
- default: True
-
- validation_tests: Build validation test programs (yes|no)
- default: False
-
- benchmark_tests: Build benchmark test programs (yes|no)
- default: False
-
- test_filter: Pattern to specify the tests' filenames to be compiled
- default: *.cpp
-
- pmu: Enable PMU counters (yes|no)
- default: False
-
- mali: Enable Arm® Mali™ hardware counters (yes|no)
- default: False
-
- external_tests_dir: Add examples, benchmarks and tests to the tests suite from an external path ( /path/to/external_tests_dir )
- default:
-
-@b debug / @b asserts:
- - With debug=1 asserts are enabled, and the library is built with symbols and no optimisations enabled.
- - With debug=0 and asserts=1: Optimisations are enabled and symbols are removed, however all the asserts are still present (This is about 20% slower than the release build)
- - With debug=0 and asserts=0: All optimisations are enable and no validation is performed, if the application misuses the library it is likely to result in a crash. (Only use this mode once you are sure your application is working as expected).
-
-@b arch: The x86_32 and x86_64 targets can only be used with neon=0 and opencl=1.
-
-@b os: Choose the operating system you are targeting: Linux, Android or bare metal.
-@note bare metal can only be used for Arm® Neon™ (not OpenCL), only static libraries get built and Neon's multi-threading support is disabled.
-
-@b build: you can either build directly on your device (native) or cross compile from your desktop machine (cross-compile). In both cases make sure the compiler is available in your path.
-
-@note If you want to natively compile for 32bit on a 64bit Arm device running a 64bit OS then you will have to use cross-compile too.
-
-There is also an 'embed_only' option which will generate all the .embed files for the OpenCL kernels. This might be useful if using a different build system to compile the library.
-
-In addittion the option 'compress_kernels' will compress the embedded OpenCL kernel files using zlib and inject them in the library. This is useful for reducing the binary size. Note, this option is only available for Android when 'embed_kernels' is enabled.
-
-@b Werror: If you are compiling using the same toolchains as the ones used in this guide then there shouldn't be any warning and therefore you should be able to keep Werror=1. If with a different compiler version the library fails to build because of warnings interpreted as errors then, if you are sure the warnings are not important, you might want to try to build with Werror=0 (But please do report the issue on Github).
-
-@b opencl / @b neon: Choose which SIMD technology you want to target. (Neon for Arm Cortex-A CPUs or OpenCL for Arm® Mali™ GPUs)
-
-@b embed_kernels: For OpenCL only: set embed_kernels=1 if you want the OpenCL kernels to be built in the library's binaries instead of being read from separate ".cl" / ".cs" files. If embed_kernels is set to 0 then the application can set the path to the folder containing the OpenCL kernel files by calling CLKernelLibrary::init(). By default the path is set to "./cl_kernels".
-
-@b set_soname: Do you want to build the versioned version of the library ?
-
-If enabled the library will contain a SONAME and SHLIBVERSION and some symlinks will automatically be created between the objects.
-Example:
- libarm_compute_core.so -> libarm_compute_core.so.1.0.0
- libarm_compute_core.so.1 -> libarm_compute_core.so.1.0.0
- libarm_compute_core.so.1.0.0
-
-@note This options is disabled by default as it requires SCons version 2.4 or above.
-
-@b extra_cxx_flags: Custom CXX flags which will be appended to the end of the build command.
-
-@b build_dir: Build the library in a subfolder of the "build" folder. (Allows to build several configurations in parallel).
-
-@b examples: Build or not the examples
-
-@b validation_tests: Enable the build of the validation suite.
-
-@b benchmark_tests: Enable the build of the benchmark tests
-
-@b pmu: Enable the PMU cycle counter to measure execution time in benchmark tests. (Your device needs to support it)
-
-@b mali: Enable the collection of Arm® Mali™ hardware counters to measure execution time in benchmark tests. (Your device needs to have a Arm® Mali™ driver that supports it)
-
-@b openmp Build in the OpenMP scheduler for Neon.
-
-@note Only works when building with g++ not clang++
-
-@b cppthreads Build in the C++11 scheduler for Neon.
-
-@sa Scheduler::set
-
-@b external_tests_dir Add examples, benchmarks and tests to the tests suite from an external path ( /path/to/external_tests_dir )
-
-In order to use this option, the external tests directory must have the following structure:
-
- EXTERNAL_TESTS_DIR:
- └── tests
- ├── benchmark
- │   ├── CL
- │   ├── datasets
- │   ├── fixtures
- │   └── Neon
- └── validation
-    ├── CL
-     ├── datasets
-     ├── fixtures
-     └── Neon
-
-Then, build the library with `external_tests_dir=<PATH_TO_EXTERNAL_TESTS_DIR>`.
-
-@subsection S3_2_linux Building for Linux
-
-@subsubsection S3_2_1_library How to build the library ?
-
-For Linux, the library was successfully built and tested using the following Linaro GCC toolchain:
-
- - gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf
- - gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu
-
-To cross-compile the library in debug mode, with Arm® Neon™ only support, for Linux 32bit:
-
- scons Werror=1 -j8 debug=1 neon=1 opencl=0 os=linux arch=armv7a
-
-To cross-compile the library in asserts mode, with OpenCL only support, for Linux 64bit:
-
- scons Werror=1 -j8 debug=0 asserts=1 neon=0 opencl=1 embed_kernels=1 os=linux arch=arm64-v8a
-
-You can also compile the library natively on an Arm device by using <b>build=native</b>:
-
- scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux arch=arm64-v8a build=native
- scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux arch=armv7a build=native
-
-@note g++ for Arm is mono-arch, therefore if you want to compile for Linux 32bit on a Linux 64bit platform you will have to use a cross compiler.
-
-For example on a 64bit Debian based system you would have to install <b>g++-arm-linux-gnueabihf</b>
-
- apt-get install g++-arm-linux-gnueabihf
-
-Then run
-
- scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux arch=armv7a build=cross_compile
-
-or simply remove the build parameter as build=cross_compile is the default value:
-
- scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux arch=armv7a
-
-@subsubsection S3_2_2_examples How to manually build the examples ?
-
-The examples get automatically built by scons as part of the build process of the library described above. This section just describes how you can build and link your own application against our library.
-
-@note The following command lines assume the arm_compute libraries are present in the current directory or in the system library path. If this is not the case you can specify the location of the pre-built libraries with the compiler option -L. When building the OpenCL example the commands below assume that the CL headers are located in the include folder where the command is executed.
-
-To cross compile a Arm® Neon™ example for Linux 32bit:
-
- arm-linux-gnueabihf-g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute -larm_compute_core -o neon_convolution
-
-To cross compile a Arm® Neon™ example for Linux 64bit:
-
- aarch64-linux-gnu-g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -L. -larm_compute -larm_compute_core -o neon_convolution
-
-(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
-
-To cross compile an OpenCL example for Linux 32bit:
-
- arm-linux-gnueabihf-g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
-
-To cross compile an OpenCL example for Linux 64bit:
-
- aarch64-linux-gnu-g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -L. -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
-
-(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
-
-To cross compile the examples with the Graph API, such as graph_lenet.cpp, you need to link the examples against arm_compute_graph.so too.
-
-i.e. to cross compile the "graph_lenet" example for Linux 32bit:
-
- arm-linux-gnueabihf-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
-
-i.e. to cross compile the "graph_lenet" example for Linux 64bit:
-
- aarch64-linux-gnu-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
-
-(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
-
-@note If compiling using static libraries, this order must be followed when linking: arm_compute_graph_static, arm_compute, arm_compute_core
-
-To compile natively (i.e directly on an Arm device) for Arm® Neon™ for Linux 32bit:
-
- g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -mfpu=neon -larm_compute -larm_compute_core -o neon_convolution
-
-To compile natively (i.e directly on an Arm device) for Arm® Neon™ for Linux 64bit:
-
- g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute -larm_compute_core -o neon_convolution
-
-(notice the only difference with the 32 bit command is that we don't need the -mfpu option)
-
-To compile natively (i.e directly on an Arm device) for OpenCL for Linux 32bit or Linux 64bit:
-
- g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
-
-To compile natively the examples with the Graph API, such as graph_lenet.cpp, you need to link the examples against arm_compute_graph.so too.
-
-i.e. to natively compile the "graph_lenet" example for Linux 32bit:
-
- g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
-
-i.e. to natively compile the "graph_lenet" example for Linux 64bit:
-
- g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
-
-(notice the only difference with the 32 bit command is that we don't need the -mfpu option)
-
-@note If compiling using static libraries, this order must be followed when linking: arm_compute_graph_static, arm_compute, arm_compute_core
-
-@note These two commands assume libarm_compute.so is available in your library path, if not add the path to it using -L (e.g. -Llib/linux-arm64-v8a-neon-cl-asserts/)
-@note You might need to export the path to OpenCL library as well in your LD_LIBRARY_PATH if Compute Library was built with OpenCL enabled.
-
-To run the built executable simply run:
-
- LD_LIBRARY_PATH=build ./neon_convolution
-
-or
-
- LD_LIBRARY_PATH=build ./cl_convolution
-
-@note Examples accept different types of arguments, to find out what they are run the example with \a --help as an argument. If no arguments are specified then random values will be used to execute the graph.
-
-For example:
-
- LD_LIBRARY_PATH=. ./graph_lenet --help
-
-Below is a list of the common parameters among the graph examples :
-@snippet utils/CommonGraphOptions.h Common graph examples parameters
-
-@subsubsection S3_2_3_sve Build for SVE or SVE2
-
-In order to build for SVE or SVE2 you need a compiler that supports them. You can find more information in the following these links:
- -# GCC: https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/sve-support
- -# LLVM: https://developer.arm.com/tools-and-software/open-source-software/developer-tools/llvm-toolchain/sve-support
-
-@note You the need to indicate the toolchains using the scons "toolchain_prefix" parameter.
-
-An example build command with SVE is:
-
- scons arch=arm64-v8.2-a-sve os=linux build_dir=arm64 -j55 standalone=0 opencl=0 openmp=0 validation_tests=1 neon=1 cppthreads=1 toolchain_prefix=aarch64-none-linux-gnu-
-
-@subsection S3_3_android Building for Android
-
-For Android, the library was successfully built and tested using Google's standalone toolchains:
- - clang++ from NDK r18b for armv7a
- - clang++ from NDK r20b for arm64-v8a
- - clang++ from NDK r20b for arm64-v8.2-a with FP16 support
-
-For NDK r18 or older, here is a guide to <a href="https://developer.android.com/ndk/guides/standalone_toolchain.html">create your Android standalone toolchains from the NDK</a>:
-- Download the NDK r18b from here: https://developer.android.com/ndk/downloads/index.html to directory $NDK
-- Make sure you have Python 2.7 installed on your machine.
-- Generate the 32 and/or 64 toolchains by running the following commands to your toolchain dirctory $MY_TOOLCHAINS:
-
- $NDK/build/tools/make_standalone_toolchain.py --arch arm64 --install-dir $MY_TOOLCHAINS/aarch64-linux-android-ndk-r18b --stl libc++ --api 21
- $NDK/build/tools/make_standalone_toolchain.py --arch arm --install-dir $MY_TOOLCHAINS/arm-linux-android-ndk-r18b --stl libc++ --api 21
-
-For NDK r19 or newer, you can directly <a href="https://developer.android.com/ndk/downloads">Download</a> the NDK package for your development platform, without the need to launch the make_standalone_toolchain.py script. You can find all the prebuilt binaries inside $NDK/toolchains/llvm/prebuilt/$OS_ARCH/bin/.
-@attention the building script will look for a binary named "aarch64-linux-android-clang++", while the prebuilt binaries will have their API version as a suffix to their filename (e.g. "aarch64-linux-android21-clang++"). You should copy/rename the binary removing this suffix, or - alternatively - create an alias for it.
-
-@attention We used to use gnustl but as of NDK r17 it is deprecated so we switched to libc++
-
-@note Make sure to add the toolchains to your PATH:
-
- export PATH=$PATH:$MY_TOOLCHAINS/aarch64-linux-android-ndk-r18b/bin:$MY_TOOLCHAINS/arm-linux-android-ndk-r18b/bin
-
-@subsubsection S3_3_1_library How to build the library ?
-
-To cross-compile the library in debug mode, with Arm® Neon™ only support, for Android 32bit:
-
- CXX=clang++ CC=clang scons Werror=1 -j8 debug=1 neon=1 opencl=0 os=android arch=armv7a
-
-To cross-compile the library in asserts mode, with OpenCL only support, for Android 64bit:
-
- CXX=clang++ CC=clang scons Werror=1 -j8 debug=0 asserts=1 neon=0 opencl=1 embed_kernels=1 os=android arch=arm64-v8a
-
-@subsubsection S3_3_2_examples How to manually build the examples ?
-
-The examples get automatically built by scons as part of the build process of the library described above. This section just describes how you can build and link your own application against our library.
-
-@note The following command lines assume the arm_compute libraries are present in the current directory or in the system library path. If this is not the case you can specify the location of the pre-built libraries with the compiler option -L. When building the OpenCL example the commands below assume that the CL headers are located in the include folder where the command is executed.
-
-Once you've got your Android standalone toolchain built and added to your path you can do the following:
-
-To cross compile a Arm® Neon™ example:
-
- #32 bit:
- arm-linux-androideabi-clang++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o neon_convolution_arm -static-libstdc++ -pie
- #64 bit:
- aarch64-linux-android-clang++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o neon_convolution_aarch64 -static-libstdc++ -pie
-
-To cross compile an OpenCL example:
-
- #32 bit:
- arm-linux-androideabi-clang++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o cl_convolution_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
- #64 bit:
- aarch64-linux-android-clang++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o cl_convolution_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
-
-To cross compile the examples with the Graph API, such as graph_lenet.cpp, you need to link the library arm_compute_graph also.
-
- #32 bit:
- arm-linux-androideabi-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
- #64 bit:
- aarch64-linux-android-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
-
-@note Due to some issues in older versions of the Arm® Mali™ OpenCL DDK (<= r13p0), we recommend to link arm_compute statically on Android.
-@note When linked statically the arm_compute_graph library currently needs the --whole-archive linker flag in order to work properly
-
-Then you need to do is upload the executable and the shared library to the device using ADB:
-
- adb push neon_convolution_arm /data/local/tmp/
- adb push cl_convolution_arm /data/local/tmp/
- adb push gc_absdiff_arm /data/local/tmp/
- adb shell chmod 777 -R /data/local/tmp/
-
-And finally to run the example:
-
- adb shell /data/local/tmp/neon_convolution_arm
- adb shell /data/local/tmp/cl_convolution_arm
- adb shell /data/local/tmp/gc_absdiff_arm
-
-For 64bit:
-
- adb push neon_convolution_aarch64 /data/local/tmp/
- adb push cl_convolution_aarch64 /data/local/tmp/
- adb push gc_absdiff_aarch64 /data/local/tmp/
- adb shell chmod 777 -R /data/local/tmp/
-
-And finally to run the example:
-
- adb shell /data/local/tmp/neon_convolution_aarch64
- adb shell /data/local/tmp/cl_convolution_aarch64
- adb shell /data/local/tmp/gc_absdiff_aarch64
-
-@note Examples accept different types of arguments, to find out what they are run the example with \a --help as an argument. If no arguments are specified then random values will be used to execute the graph.
-
-For example:
- adb shell /data/local/tmp/graph_lenet --help
-
-In this case the first argument of LeNet (like all the graph examples) is the target (i.e 0 to run on Neon, 1 to run on OpenCL if available, 2 to run on OpenCL using the CLTuner), the second argument is the path to the folder containing the npy files for the weights and finally the third argument is the number of batches to run.
-
-@subsection S3_4_macos Building for macOS
-
-The library was successfully natively built for Apple Silicon under macOS 11.1 using clang v12.0.0.
-
-To natively compile the library with accelerated CPU support:
-
- scons Werror=1 -j8 neon=1 opencl=0 os=macos arch=arm64-v8a build=native
-
-@note Initial support disables feature discovery through HWCAPS and thread scheduling affinity controls
-
-@subsection S3_5_bare_metal Building for bare metal
-
-For bare metal, the library was successfully built using linaro's latest (gcc-linaro-6.3.1-2017.05) bare metal toolchains:
- - arm-eabi for armv7a
- - aarch64-elf for arm64-v8a
-
-Download linaro for <a href="https://releases.linaro.org/components/toolchain/binaries/6.3-2017.05/arm-eabi/">armv7a</a> and <a href="https://releases.linaro.org/components/toolchain/binaries/6.3-2017.05/aarch64-elf/">arm64-v8a</a>.
-
-@note Make sure to add the toolchains to your PATH: export PATH=$PATH:$MY_TOOLCHAINS/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-elf/bin:$MY_TOOLCHAINS/gcc-linaro-6.3.1-2017.05-x86_64_arm-eabi/bin
-
-@subsubsection S3_5_1_library How to build the library ?
-
-To cross-compile the library with Arm® Neon™ support for baremetal arm64-v8a:
-
- scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=bare_metal arch=arm64-v8a build=cross_compile cppthreads=0 openmp=0 standalone=1
-
-@subsubsection S3_5_2_examples How to manually build the examples ?
-
-Examples are disabled when building for bare metal. If you want to build the examples you need to provide a custom bootcode depending on the target architecture and link against the compute library. More information about bare metal bootcode can be found <a href="http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0527a/index.html">here</a>.
-
-@subsection S3_6_windows_host Building on a Windows host system
-
-Using `scons` directly from the Windows command line is known to cause
-problems. The reason seems to be that if `scons` is setup for cross-compilation
-it gets confused about Windows style paths (using backslashes). Thus it is
-recommended to follow one of the options outlined below.
-
-@subsubsection S3_6_1_ubuntu_on_windows Bash on Ubuntu on Windows
-
-The best and easiest option is to use
-<a href="https://msdn.microsoft.com/en-gb/commandline/wsl/about">Ubuntu on Windows</a>.
-This feature is still marked as *beta* and thus might not be available.
-However, if it is building the library is as simple as opening a *Bash on
-Ubuntu on Windows* shell and following the general guidelines given above.
-
-@subsubsection S3_6_2_cygwin Cygwin
-
-If the Windows subsystem for Linux is not available <a href="https://www.cygwin.com/">Cygwin</a>
-can be used to install and run `scons`, the minimum Cygwin version must be 3.0.7 or later. In addition
-to the default packages installed by Cygwin `scons` has to be selected in the installer. (`git` might
-also be useful but is not strictly required if you already have got the source
-code of the library.) Linaro provides pre-built versions of
-<a href="http://releases.linaro.org/components/toolchain/binaries/">GCC cross-compilers</a>
-that can be used from the Cygwin terminal. When building for Android the
-compiler is included in the Android standalone toolchain. After everything has
-been set up in the Cygwin terminal the general guide on building the library
-can be followed.
-
-@subsection S3_7_cl_requirements OpenCL DDK Requirements
-
-@subsubsection S3_7_1_cl_hard_requirements Hard Requirements
-
-Compute Library requires OpenCL 1.1 and above with support of non uniform workgroup sizes, which is officially supported in the Arm® Mali™ OpenCL DDK r8p0 and above as an extension (respective extension flag is \a -cl-arm-non-uniform-work-group-size).
-
-Enabling 16-bit floating point calculations require \a cl_khr_fp16 extension to be supported. All Arm® Mali™ GPUs with compute capabilities have native support for half precision floating points.
-
-@subsubsection S3_7_2_cl_performance_requirements Performance improvements
-
-Integer dot product built-in function extensions (and therefore optimized kernels) are available with Arm® Mali™ OpenCL DDK r22p0 and above for the following GPUs : G71, G76. The relevant extensions are \a cl_arm_integer_dot_product_int8, \a cl_arm_integer_dot_product_accumulate_int8 and \a cl_arm_integer_dot_product_accumulate_int16.
-
-OpenCL kernel level debugging can be simplified with the use of printf, this requires the \a cl_arm_printf extension to be supported.
-
-SVM allocations are supported for all the underlying allocations in Compute Library. To enable this OpenCL 2.0 and above is a requirement.
-
-@subsection S3_8_cl_tuner OpenCL Tuner
-
-The OpenCL tuner, a.k.a. CLTuner, is a module of Arm Compute Library that can improve the performance of the OpenCL kernels tuning the Local-Workgroup-Size (LWS).
-The optimal LWS for each unique OpenCL kernel configuration is stored in a table. This table can be either imported or exported from/to a file.
-The OpenCL tuner runs the same OpenCL kernel for a range of local workgroup sizes and keeps the local workgroup size of the fastest run to use in subsequent calls to the kernel. It supports three modes of tuning with different trade-offs between the time taken to tune and the kernel execution time achieved using the best LWS found. In the Exhaustive mode, it searches all the supported values of LWS. This mode takes the longest time to tune and is the most likely to find the optimal LWS. Normal mode searches a subset of LWS values to yield a good approximation of the optimal LWS. It takes less time to tune than Exhaustive mode. Rapid mode takes the shortest time to tune and finds an LWS value that is at least as good or better than the default LWS value. The mode affects only the search for the optimal LWS and has no effect when the LWS value is imported from a file.
-In order for the performance numbers to be meaningful you must disable the GPU power management and set it to a fixed frequency for the entire duration of the tuning phase.
-
-If you wish to know more about LWS and the important role on improving the GPU cache utilization, we suggest having a look at the presentation "Even Faster CNNs: Exploring the New Class of Winograd Algorithms available at the following link:
-
-https://www.embedded-vision.com/platinum-members/arm/embedded-vision-training/videos/pages/may-2018-embedded-vision-summit-iodice
-
-Tuning a network from scratch can be long and affect considerably the execution time for the first run of your network. It is recommended for this reason to store the CLTuner's result in a file to amortize this time when you either re-use the same network or the functions with the same configurations. The tuning is performed only once for each OpenCL kernel.
-
-CLTuner looks for the optimal LWS for each unique OpenCL kernel configuration. Since a function (i.e. Convolution Layer, Pooling Layer, Fully Connected Layer ...) can be called multiple times but with different parameters, we associate an "id" (called "config_id") to each kernel to distinguish the unique configurations.
-
- #Example: 2 unique Matrix Multiply configurations
-@code{.cpp}
- TensorShape a0 = TensorShape(32,32);
- TensorShape b0 = TensorShape(32,32);
- TensorShape c0 = TensorShape(32,32);
- TensorShape a1 = TensorShape(64,64);
- TensorShape b1 = TensorShape(64,64);
- TensorShape c1 = TensorShape(64,64);
-
- Tensor a0_tensor;
- Tensor b0_tensor;
- Tensor c0_tensor;
- Tensor a1_tensor;
- Tensor b1_tensor;
- Tensor c1_tensor;
-
- a0_tensor.allocator()->init(TensorInfo(a0, 1, DataType::F32));
- b0_tensor.allocator()->init(TensorInfo(b0, 1, DataType::F32));
- c0_tensor.allocator()->init(TensorInfo(c0, 1, DataType::F32));
- a1_tensor.allocator()->init(TensorInfo(a1, 1, DataType::F32));
- b1_tensor.allocator()->init(TensorInfo(b1, 1, DataType::F32));
- c1_tensor.allocator()->init(TensorInfo(c1 1, DataType::F32));
-
- CLGEMM gemm0;
- CLGEMM gemm1;
-
- // Configuration 0
- gemm0.configure(&a0, &b0, nullptr, &c0, 1.0f, 0.0f);
-
- // Configuration 1
- gemm1.configure(&a1, &b1, nullptr, &c1, 1.0f, 0.0f);
-@endcode
-
-@subsubsection S3_8_1_cl_tuner_how_to How to use it
-
-All the graph examples in the Compute Library's folder "examples" and the arm_compute_benchmark accept an argument to enable the OpenCL tuner and an argument to export/import the LWS values to/from a file
-
- #Enable CL tuner
- ./graph_mobilenet --enable-tuner –-target=CL
- ./arm_compute_benchmark --enable-tuner
-
- #Export/Import to/from a file
- ./graph_mobilenet --enable-tuner --target=CL --tuner-file=acl_tuner.csv
- ./arm_compute_benchmark --enable-tuner --tuner-file=acl_tuner.csv
-
-If you are importing the CLTuner'results from a file, the new tuned LWS values will be appended to it.
-
-Either you are benchmarking the graph examples or the test cases in the arm_compute_benchmark remember to:
-
- -# Disable the power management
- -# Keep the GPU frequency constant
- -# Run multiple times the network (i.e. 10).
-
-If you are not using the graph API or the benchmark infrastructure you will need to manually pass a CLTuner object to CLScheduler before configuring any function.
-
-@code{.cpp}
-CLTuner tuner;
-
-// Setup Scheduler
-CLScheduler::get().default_init(&tuner);
-@endcode
-
-After the first run, the CLTuner's results can be exported to a file using the method "save_to_file()".
-- tuner.save_to_file("results.csv");
-
-This file can be also imported using the method "load_from_file("results.csv")".
-- tuner.load_from_file("results.csv");
-*/
-} // namespace arm_compute
+ */
+} // namespace arm_compute \ No newline at end of file
diff --git a/docs/02_tests.dox b/docs/user_guide/tests.dox
index 70d2f3d67b..0d166b9693 100644
--- a/docs/02_tests.dox
+++ b/docs/user_guide/tests.dox
@@ -26,7 +26,7 @@ namespace arm_compute
namespace test
{
/**
-@page tests Validation and benchmarks tests
+@page tests Validation and Benchmarks
@tableofcontents