aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/tuners
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-06-04 19:27:13 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:52:54 +0000
commit17812ba9f7cf2c8f5121c11760ac45fbbdb7aeaf (patch)
tree28c7bb65a8306e82de91a644fdcc1c0947c6f6d7 /src/runtime/CL/tuners
parentf8d8f3aff04faf731f20411ecb91027eab4365c5 (diff)
downloadComputeLibrary-17812ba9f7cf2c8f5121c11760ac45fbbdb7aeaf.tar.gz
COMPMID-817: Tuner: Port kernels to new design.
Change-Id: Iaabb1153c2abe0400ec79d51a21347debe92d642 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/134062 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/runtime/CL/tuners')
-rw-r--r--src/runtime/CL/tuners/BifrostTuner.cpp150
-rw-r--r--src/runtime/CL/tuners/MidgardTuner.cpp77
2 files changed, 226 insertions, 1 deletions
diff --git a/src/runtime/CL/tuners/BifrostTuner.cpp b/src/runtime/CL/tuners/BifrostTuner.cpp
index c0ebd24afe..edd074ba08 100644
--- a/src/runtime/CL/tuners/BifrostTuner.cpp
+++ b/src/runtime/CL/tuners/BifrostTuner.cpp
@@ -124,15 +124,163 @@ void tune_direct_convolution_kernel(CLDirectConvolutionLayerKernel &k)
k.set_lws_hint(lws_hint);
}
}
+
+void tune_col2im_kernel(CLCol2ImKernel &k)
+{
+ cl::NDRange lws_hint = k.lws_hint();
+ const GPUTarget gpu_target = k.get_target();
+
+ // Configure the local work size for Bifrost with a value obtained
+ // via exhaustive autotuning over 30 representative tensor shapes.
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX))
+ {
+ if((k._convolved_dims.first == 7) || (k._convolved_dims.first == 14))
+ {
+ lws_hint = cl::NDRange(1, 7, 1);
+ }
+ else
+ {
+ lws_hint = cl::NDRange(1, 8, 1);
+ }
+ }
+
+ k.set_lws_hint(lws_hint);
+}
+
+void tune_im2col_kernel(CLIm2ColKernel &k)
+{
+ cl::NDRange lws_hint = k.lws_hint();
+ const GPUTarget gpu_target = k.get_target();
+
+ // Local work size optimized for the 11x11 AlexNet convolution on Bifrost.
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX) && k._kernel_dims.width == 11)
+ {
+ const bool is_square_kernel = (k._kernel_dims.width == k._kernel_dims.height);
+ if(!is_square_kernel && k._kernel_dims.width > 1 && !k._conv_info.has_padding())
+ {
+ lws_hint = cl::NDRange(1, 1, 1);
+ }
+ }
+ k.set_lws_hint(lws_hint);
+}
+
+void tune_depthwise_im2col_kernel(CLDepthwiseIm2ColKernel &k)
+{
+ cl::NDRange lws_hint = k.lws_hint();
+ const GPUTarget gpu_target = k.get_target();
+
+ // Configure the local work size for Bifrost with a value obtained
+ // via exhaustive autotuning for the MobileNets tensor shapes.
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX))
+ {
+ lws_hint = cl::NDRange(1, 2, 1);
+ }
+
+ k.set_lws_hint(lws_hint);
+}
+
+void tune_gemv_kernel(CLGEMMMatrixVectorMultiplyKernel &k)
+{
+ cl::NDRange lws_hint = k.lws_hint();
+ const GPUTarget gpu_target = k.get_target();
+
+ // Configure the local work size for Bifrost with a value obtained
+ // via exhaustive autotuning for the MobileNets tensor shapes.
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX))
+ {
+ lws_hint = cl::NDRange(1, 1, 1);
+ }
+
+ k.set_lws_hint(lws_hint);
+}
+
+void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k)
+{
+ cl::NDRange lws_hint = k.lws_hint();
+ const GPUTarget gpu_target = k.get_target();
+
+ // Configure LWS hint
+ switch(gpu_target)
+ {
+ case GPUTarget::G71:
+ case GPUTarget::G72:
+ case GPUTarget::G51:
+ case GPUTarget::G51BIG:
+ case GPUTarget::G51LIT:
+ case GPUTarget::TNOX:
+ if(k._input1->info()->dimension(1) == 24)
+ {
+ // LWS optimized for the 11x11 AlexNet convolution on Bifrost.
+ lws_hint = cl::NDRange(2, 2);
+ }
+ else if(k._output->info()->dimension(1) == 196)
+ {
+ lws_hint = cl::NDRange(1, 7);
+ }
+ else
+ {
+ lws_hint = cl::NDRange(8, 8);
+ }
+ break;
+ default:
+ lws_hint = cl::NullRange;
+ }
+
+ k.set_lws_hint(lws_hint);
+}
+
+void tune_pooling_kernel(CLPoolingLayerKernel &k)
+{
+ cl::NDRange lws_hint = k.lws_hint();
+ const GPUTarget gpu_target = k.get_target();
+
+ // Configure the local work size (hint) from the first two dimensions of the global work size.
+ // On Bifrost, this works for up to 35x35xC filters, for which the pooling_layer_3_optimized
+ // kernel is launched with gws=(9, 33, C). In any case, the hint will be ignored if it is
+ // invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with).
+ if(k._input->info()->data_layout() == DataLayout::NCHW)
+ {
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX))
+ {
+ cl::NDRange gws = ICLKernel::gws_from_window(k.window());
+ lws_hint = cl::NDRange(gws[0], gws[1], 1);
+ }
+ }
+
+ k.set_lws_hint(lws_hint);
+}
} // namespace
void BifrostTuner::tune_kernel_static(ICLKernel &kernel)
{
- // Continue on tuning if dynamic tuning
if(dynamic_cast<CLDirectConvolutionLayerKernel *>(&kernel) != nullptr)
{
tune_direct_convolution_kernel(*utils::cast::polymorphic_downcast<CLDirectConvolutionLayerKernel *>(&kernel));
}
+ else if(dynamic_cast<CLCol2ImKernel *>(&kernel) != nullptr)
+ {
+ tune_col2im_kernel(*utils::cast::polymorphic_downcast<CLCol2ImKernel *>(&kernel));
+ }
+ else if(dynamic_cast<CLIm2ColKernel *>(&kernel) != nullptr)
+ {
+ tune_im2col_kernel(*utils::cast::polymorphic_downcast<CLIm2ColKernel *>(&kernel));
+ }
+ else if(dynamic_cast<CLDepthwiseIm2ColKernel *>(&kernel) != nullptr)
+ {
+ tune_depthwise_im2col_kernel(*utils::cast::polymorphic_downcast<CLDepthwiseIm2ColKernel *>(&kernel));
+ }
+ else if(dynamic_cast<CLGEMMMatrixVectorMultiplyKernel *>(&kernel) != nullptr)
+ {
+ tune_gemv_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixVectorMultiplyKernel *>(&kernel));
+ }
+ else if(dynamic_cast<CLGEMMMatrixMultiplyKernel *>(&kernel) != nullptr)
+ {
+ tune_gemm_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixMultiplyKernel *>(&kernel));
+ }
+ else if(dynamic_cast<CLPoolingLayerKernel *>(&kernel) != nullptr)
+ {
+ tune_pooling_kernel(*utils::cast::polymorphic_downcast<CLPoolingLayerKernel *>(&kernel));
+ }
}
void BifrostTuner::tune_kernel_dynamic(ICLKernel &kernel)
diff --git a/src/runtime/CL/tuners/MidgardTuner.cpp b/src/runtime/CL/tuners/MidgardTuner.cpp
new file mode 100644
index 0000000000..2c4b1ac94c
--- /dev/null
+++ b/src/runtime/CL/tuners/MidgardTuner.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/tuners/MIdgardTuner.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernels.h"
+#include "arm_compute/core/utils/misc/Cast.h"
+
+namespace arm_compute
+{
+namespace tuners
+{
+namespace
+{
+void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k)
+{
+ cl::NDRange lws_hint = k.lws_hint();
+ const GPUTarget gpu_target = k.get_target();
+
+ switch(gpu_target)
+ {
+ case GPUTarget::MIDGARD:
+ case GPUTarget::T600:
+ case GPUTarget::T700:
+ case GPUTarget::T800:
+ if(k._output->info()->dimension(1) == 196)
+ {
+ lws_hint = cl::NDRange(1, 7);
+ }
+ else
+ {
+ lws_hint = cl::NDRange(8, 8);
+ }
+ break;
+ default:
+ lws_hint = cl::NullRange;
+ }
+
+ k.set_lws_hint(lws_hint);
+}
+} // namespace
+
+void MidgardTuner::tune_kernel_static(ICLKernel &kernel)
+{
+ if(dynamic_cast<CLGEMMMatrixMultiplyKernel *>(&kernel) != nullptr)
+ {
+ tune_gemm_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixMultiplyKernel *>(&kernel));
+ }
+}
+
+void MidgardTuner::tune_kernel_dynamic(ICLKernel &kernel)
+{
+ ARM_COMPUTE_UNUSED(kernel);
+}
+} // namespace tuners
+} // namespace arm_compute \ No newline at end of file