aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--SConscript1
-rw-r--r--arm_compute/core/CL/CLTypes.h13
-rw-r--r--arm_compute/core/CL/ICLKernel.h9
-rw-r--r--arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h4
-rw-r--r--arm_compute/core/utils/misc/Signal.h99
-rw-r--r--arm_compute/runtime/CL/CLScheduler.h18
-rw-r--r--arm_compute/runtime/CL/CLTuner.h3
-rw-r--r--arm_compute/runtime/CL/ICLTuner.h16
-rw-r--r--arm_compute/runtime/CL/tuners/BifrostTuner.h43
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp73
-rw-r--r--src/graph/Graph.cpp1
-rw-r--r--src/runtime/CL/CLScheduler.cpp2
-rw-r--r--src/runtime/CL/CLTuner.cpp7
-rw-r--r--src/runtime/CL/functions/CLDirectConvolutionLayer.cpp5
-rw-r--r--src/runtime/CL/tuners/BifrostTuner.cpp143
-rw-r--r--tests/validation/CL/UNIT/Tuner.cpp80
16 files changed, 426 insertions, 91 deletions
diff --git a/SConscript b/SConscript
index f90ee3e536..5a4b6ce5ed 100644
--- a/SConscript
+++ b/SConscript
@@ -186,6 +186,7 @@ if env['opencl']:
runtime_files += Glob('src/runtime/CL/*.cpp')
runtime_files += Glob('src/runtime/CL/functions/*.cpp')
+ runtime_files += Glob('src/runtime/CL/tuners/*.cpp')
graph2_files += Glob('src/graph2/backends/CL/*.cpp')
diff --git a/arm_compute/core/CL/CLTypes.h b/arm_compute/core/CL/CLTypes.h
index a9d5fdd063..14c31fa396 100644
--- a/arm_compute/core/CL/CLTypes.h
+++ b/arm_compute/core/CL/CLTypes.h
@@ -60,5 +60,16 @@ enum class CLVersion
CL20, /* the OpenCL 2.0 and above */
UNKNOWN /* unkown version */
};
-}
+
+/** OpenCL device options */
+struct CLDeviceOptions
+{
+ std::string name; /**< Device name */
+ std::string extensions; /**< List of supported extensions */
+ std::string ddk_version; /**< DDK version */
+ GPUTarget gpu_target; /**< GPU target architecture/instance */
+ size_t num_cores; /**< Number of cores */
+ size_t cache_size; /**< Cache size */
+};
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_CL_TYPES_H__ */
diff --git a/arm_compute/core/CL/ICLKernel.h b/arm_compute/core/CL/ICLKernel.h
index c7d0c2156b..e6700cd61e 100644
--- a/arm_compute/core/CL/ICLKernel.h
+++ b/arm_compute/core/CL/ICLKernel.h
@@ -198,6 +198,15 @@ public:
_lws_hint = lws_hint;
}
+ /** Return the Local-Workgroup-Size hint
+ *
+ * @return Current lws hint
+ */
+ cl::NDRange lws_hint() const
+ {
+ return _lws_hint;
+ }
+
/** Get the configuration ID
*
* @note The configuration ID can be used by the caller to distinguish different calls of the same OpenCL kernel
diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
index d47b7da213..eb1bf58b1b 100644
--- a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,7 +88,7 @@ public:
void run(const Window &window, cl::CommandQueue &queue) override;
BorderSize border_size() const override;
-private:
+public:
const ICLTensor *_input;
const ICLTensor *_biases;
const ICLTensor *_weights;
diff --git a/arm_compute/core/utils/misc/Signal.h b/arm_compute/core/utils/misc/Signal.h
new file mode 100644
index 0000000000..71f13951b9
--- /dev/null
+++ b/arm_compute/core/utils/misc/Signal.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_MISC_SIGNAL_H__
+#define __ARM_COMPUTE_MISC_SIGNAL_H__
+
+#include <functional>
+
+namespace arm_compute
+{
+namespace utils
+{
+namespace signal
+{
+namespace detail
+{
+/** Base signal class */
+template <typename SignalType>
+class SignalImpl;
+
+/** Signal class function specialization */
+template <typename ReturnType, typename... Args>
+class SignalImpl<ReturnType(Args...)>
+{
+public:
+ using Callback = std::function<ReturnType(Args...)>;
+
+public:
+ /** Default Constructor */
+ SignalImpl() = default;
+
+ /** Connects signal
+ *
+ * @param[in] cb Callback to connect the signal with
+ */
+ void connect(const Callback &cb)
+ {
+ _cb = cb;
+ }
+
+ /** Disconnects the signal */
+ void disconnect()
+ {
+ _cb = nullptr;
+ }
+
+ /** Checks if the signal is connected
+ *
+ * @return True if there is a connection else false
+ */
+ bool connected() const
+ {
+ return (_cb != nullptr);
+ }
+
+ /** Calls the connected callback
+ *
+ * @param[in] args Callback arguments
+ */
+ void operator()(Args &&... args)
+ {
+ if(_cb)
+ {
+ _cb(std::forward<Args>(args)...);
+ }
+ }
+
+private:
+ Callback _cb{}; /**< Signal callback */
+};
+} // namespace detail
+
+/** Signal alias */
+template <class T>
+using Signal = detail::SignalImpl<T>;
+} // namespace signal
+} // namespace utils
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_MISC_SIGNAL_H__ */
diff --git a/arm_compute/runtime/CL/CLScheduler.h b/arm_compute/runtime/CL/CLScheduler.h
index 89c3bc1553..e1e7ff637f 100644
--- a/arm_compute/runtime/CL/CLScheduler.h
+++ b/arm_compute/runtime/CL/CLScheduler.h
@@ -30,7 +30,7 @@
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/CLTuner.h"
+#include "arm_compute/runtime/CL/ICLTuner.h"
#if defined(ARM_COMPUTE_DEBUG_ENABLED)
namespace
@@ -194,17 +194,19 @@ public:
return event;
}
-private:
- /** Tune OpenCL kernel
- *
- * @note This method uses a brute force approach to find the optimal LWS
+ /** Tunes OpenCL kernel
*
* @param[in] kernel Kernel to tune
- *
- * @return The optimal LWS for the specified kernel
*/
- cl::NDRange tune_kernel(ICLKernel &kernel);
+ void tune_kernel_static(ICLKernel &kernel)
+ {
+ if(_cl_tuner != nullptr)
+ {
+ _cl_tuner->tune_kernel_static(kernel);
+ }
+ }
+private:
/** Flag to ensure symbols initialisation is happening before Scheduler creation */
static std::once_flag _initialize_symbols;
diff --git a/arm_compute/runtime/CL/CLTuner.h b/arm_compute/runtime/CL/CLTuner.h
index c1fbfd249c..f789500de3 100644
--- a/arm_compute/runtime/CL/CLTuner.h
+++ b/arm_compute/runtime/CL/CLTuner.h
@@ -99,7 +99,8 @@ public:
void save_to_file(const std::string &filename) const;
// Inherited methods overridden:
- void tune_kernel(ICLKernel &kernel) override;
+ void tune_kernel_static(ICLKernel &kernel) override;
+ void tune_kernel_dynamic(ICLKernel &kernel) override;
/** Is the kernel_event set ?
*
diff --git a/arm_compute/runtime/CL/ICLTuner.h b/arm_compute/runtime/CL/ICLTuner.h
index c71835c0aa..95b22b5b7e 100644
--- a/arm_compute/runtime/CL/ICLTuner.h
+++ b/arm_compute/runtime/CL/ICLTuner.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,11 +34,21 @@ class ICLTuner
public:
/** Virtual destructor */
virtual ~ICLTuner() = default;
- /** Tune OpenCL kernel
+ /** Tune OpenCL kernel statically
+ *
+ * @note Tuning is performed using only kernel and tensor metadata,
+ * thus can be performed when memory is not available
+ *
+ * @param[in] kernel Kernel to tune
+ */
+ virtual void tune_kernel_static(ICLKernel &kernel) = 0;
+ /** Tune OpenCL kernel dynamically
+ *
+ * @note Tuning requires memory to be available on all kernel tensors and objects in order to be performed
*
* @param[in] kernel Kernel to tune
*/
- virtual void tune_kernel(ICLKernel &kernel) = 0;
+ virtual void tune_kernel_dynamic(ICLKernel &kernel) = 0;
};
}
#endif /*__ARM_COMPUTE_ICLTUNER_H__ */
diff --git a/arm_compute/runtime/CL/tuners/BifrostTuner.h b/arm_compute/runtime/CL/tuners/BifrostTuner.h
new file mode 100644
index 0000000000..080a37c152
--- /dev/null
+++ b/arm_compute/runtime/CL/tuners/BifrostTuner.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TUNERS_BIFROST_TUNER_H__
+#define __ARM_COMPUTE_TUNERS_BIFROST_TUNER_H__
+
+#include "arm_compute/runtime/CL/ICLTuner.h"
+
+namespace arm_compute
+{
+namespace tuners
+{
+/** Bifrost based OpenCL tuner implementation */
+class BifrostTuner final : public ICLTuner
+{
+public:
+ // Inherited overriden methods
+ void tune_kernel_static(ICLKernel &kernel) override;
+ void tune_kernel_dynamic(ICLKernel &kernel) override;
+};
+} // namespace tuners
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_TUNERS_BIFROST_TUNER_H__ */
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
index 56ac0c7250..b5526c4fca 100644
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
@@ -315,79 +315,6 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
kernel_name << "_f32_bifrost";
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name.str(), build_options.options()));
-
- // Through extensive experimentation with over 30 representative tensor
- // shapes, we found a small number of local work size configurations
- // that result in nearly optimal execution times. Selecting the right
- // lws for a given shape, however, required a complex decision tree,
- // until we constructed a simple feature as described below.
- //
- // We started from the number of multiply-accumulate operations for a
- // convolution layer, which is equal to the product of the input
- // dimensions 0..2 and the weights dimensions 0..2. Unfortunately,
- // this resulted in ties between distinct shapes that required distinct
- // lws configurations. Replacing the width of the input with the kernel
- // size, however, resulted in nearly optimal predictions. We use underscores
- // in variable names to indicate when they are intentionally misleading.
- const size_t product_of_weights_dimensions = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2);
- const size_t product_of_input_dimensions_ = input->info()->dimension(0) * weights->info()->dimension(1) * input->info()->dimension(2);
- const float mega_ops_ = 1e-6 * product_of_weights_dimensions * product_of_input_dimensions_;
-
- switch(kernel_size)
- {
- case 1:
- {
- if(mega_ops_ < 1.f)
- {
- _lws_hint = cl::NDRange(1, 1, 8);
- }
- else if(mega_ops_ < 7.f)
- {
- _lws_hint = cl::NDRange(1, 1, 4);
- }
- else
- {
- _lws_hint = cl::NDRange(1, 1, 2);
- }
- break;
- }
- case 3:
- {
- if(mega_ops_ < 1.f)
- {
- _lws_hint = cl::NDRange(1, 1, 8);
- }
- else if(mega_ops_ < 13.f)
- {
- _lws_hint = cl::NDRange(2, 1, 4);
- }
- else if(mega_ops_ < 50.f)
- {
- _lws_hint = cl::NDRange(3, 1, 4);
- }
- else
- {
- _lws_hint = cl::NDRange(2, 1, 6);
- }
- break;
- }
- case 5:
- {
- if(mega_ops_ < 2.f || mega_ops_ > 80.f)
- {
- _lws_hint = cl::NDRange(2, 1, 4);
- }
- else
- {
- _lws_hint = cl::NDRange(2, 1, 8);
- }
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Kernel size not optimized for Bifrost");
- }
- }
}
else
{
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
index 2fe3a90aef..47bd672114 100644
--- a/src/graph/Graph.cpp
+++ b/src/graph/Graph.cpp
@@ -30,6 +30,7 @@
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTuner.h"
#include "arm_compute/runtime/Tensor.h"
#include "support/ToolchainSupport.h"
diff --git a/src/runtime/CL/CLScheduler.cpp b/src/runtime/CL/CLScheduler.cpp
index 65292fe837..2a5d836c33 100644
--- a/src/runtime/CL/CLScheduler.cpp
+++ b/src/runtime/CL/CLScheduler.cpp
@@ -52,7 +52,7 @@ void CLScheduler::enqueue(ICLKernel &kernel, bool flush)
if(_cl_tuner != nullptr)
{
// Tune the OpenCL kernel
- _cl_tuner->tune_kernel(kernel);
+ _cl_tuner->tune_kernel_dynamic(kernel);
}
// Run kernel
diff --git a/src/runtime/CL/CLTuner.cpp b/src/runtime/CL/CLTuner.cpp
index df8e255356..17a62ab46e 100644
--- a/src/runtime/CL/CLTuner.cpp
+++ b/src/runtime/CL/CLTuner.cpp
@@ -113,7 +113,12 @@ bool CLTuner::tune_new_kernels() const
return _tune_new_kernels;
}
-void CLTuner::tune_kernel(ICLKernel &kernel)
+void CLTuner::tune_kernel_static(ICLKernel &kernel)
+{
+ ARM_COMPUTE_UNUSED(kernel);
+}
+
+void CLTuner::tune_kernel_dynamic(ICLKernel &kernel)
{
// Get the configuration ID from the kernel
const std::string &config_id = kernel.config_id();
diff --git a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
index d6a335c1ec..c48865a0cc 100644
--- a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,6 +52,9 @@ void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weig
zero_value = PixelValue(static_cast<uint8_t>(input->info()->quantization_info().offset));
}
_input_border_handler.configure(input, _direct_conv_kernel.border_size(), BorderMode::CONSTANT, zero_value);
+
+ // Tune kernels
+ CLScheduler::get().tune_kernel_static(_direct_conv_kernel);
}
Status CLDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
diff --git a/src/runtime/CL/tuners/BifrostTuner.cpp b/src/runtime/CL/tuners/BifrostTuner.cpp
new file mode 100644
index 0000000000..c0ebd24afe
--- /dev/null
+++ b/src/runtime/CL/tuners/BifrostTuner.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/tuners/BifrostTuner.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernels.h"
+#include "arm_compute/core/utils/misc/Cast.h"
+
+namespace arm_compute
+{
+namespace tuners
+{
+namespace
+{
+/** Tunes a @ref CLDirectConvolutionLayerKernel for a bifrost target
+ *
+ * @param[in] k Kernels to tune
+ */
+void tune_direct_convolution_kernel(CLDirectConvolutionLayerKernel &k)
+{
+ cl::NDRange lws_hint = k.lws_hint();
+
+ const GPUTarget gpu_target = k.get_target();
+ const DataType dt = k._input->info()->data_type();
+ const TensorShape weights_shape = k._weights->info()->tensor_shape();
+ const TensorShape inputs_shape = k._input->info()->tensor_shape();
+ const size_t kernel_size = weights_shape.x();
+ const unsigned int stride_x = k._conv_stride_x;
+ const unsigned int stride_y = k._conv_stride_y;
+
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) && (kernel_size <= 5) && (stride_x == 1) && (stride_y == 1) && (dt == DataType::F32))
+ {
+ // Through extensive experimentation with over 30 representative tensor
+ // shapes, we found a small number of local work size configurations
+ // that result in nearly optimal execution times. Selecting the right
+ // lws for a given shape, however, required a complex decision tree,
+ // until we constructed a simple feature as described below.
+ //
+ // We started from the number of multiply-accumulate operations for a
+ // convolution layer, which is equal to the product of the input
+ // dimensions 0..2 and the weights dimensions 0..2. Unfortunately,
+ // this resulted in ties between distinct shapes that required distinct
+ // lws configurations. Replacing the width of the input with the kernel
+ // size, however, resulted in nearly optimal predictions. We use underscores
+ // in variable names to indicate when they are intentionally misleading.
+ const size_t product_of_weights_dimensions = weights_shape[0] * weights_shape[1] * weights_shape[2];
+ const size_t product_of_input_dimensions_ = inputs_shape[0] * inputs_shape[1] * inputs_shape[2];
+ const float mega_ops_ = 1e-6 * product_of_weights_dimensions * product_of_input_dimensions_;
+
+ switch(kernel_size)
+ {
+ case 1:
+ {
+ if(mega_ops_ < 1.f)
+ {
+ lws_hint = cl::NDRange(1, 1, 8);
+ }
+ else if(mega_ops_ < 7.f)
+ {
+ lws_hint = cl::NDRange(1, 1, 4);
+ }
+ else
+ {
+ lws_hint = cl::NDRange(1, 1, 2);
+ }
+ break;
+ }
+ case 3:
+ {
+ if(mega_ops_ < 1.f)
+ {
+ lws_hint = cl::NDRange(1, 1, 8);
+ }
+ else if(mega_ops_ < 13.f)
+ {
+ lws_hint = cl::NDRange(2, 1, 4);
+ }
+ else if(mega_ops_ < 50.f)
+ {
+ lws_hint = cl::NDRange(3, 1, 4);
+ }
+ else
+ {
+ lws_hint = cl::NDRange(2, 1, 6);
+ }
+ break;
+ }
+ case 5:
+ {
+ if(mega_ops_ < 2.f || mega_ops_ > 80.f)
+ {
+ lws_hint = cl::NDRange(2, 1, 4);
+ }
+ else
+ {
+ lws_hint = cl::NDRange(2, 1, 8);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ k.set_lws_hint(lws_hint);
+ }
+}
+} // namespace
+
+void BifrostTuner::tune_kernel_static(ICLKernel &kernel)
+{
+ // Continue on tuning if dynamic tuning
+ if(dynamic_cast<CLDirectConvolutionLayerKernel *>(&kernel) != nullptr)
+ {
+ tune_direct_convolution_kernel(*utils::cast::polymorphic_downcast<CLDirectConvolutionLayerKernel *>(&kernel));
+ }
+}
+
+void BifrostTuner::tune_kernel_dynamic(ICLKernel &kernel)
+{
+ ARM_COMPUTE_UNUSED(kernel);
+}
+} // namespace tuners
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/CL/UNIT/Tuner.cpp b/tests/validation/CL/UNIT/Tuner.cpp
new file mode 100644
index 0000000000..26d21b54f2
--- /dev/null
+++ b/tests/validation/CL/UNIT/Tuner.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/tuners/BifrostTuner.h"
+#include "support/ToolchainSupport.h"
+#include "tests/Utils.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CL)
+TEST_SUITE(UNIT)
+TEST_SUITE(Tuner)
+
+/** Validates static tuning of Bifrost tuner */
+TEST_CASE(BifrostTunerSimple, framework::DatasetMode::ALL)
+{
+ // Create tuner
+ tuners::BifrostTuner tuner;
+
+ // Create tensors
+ auto src = create_tensor<CLTensor>(TensorShape(13U, 13U, 16U), DataType::F32);
+ auto weights = create_tensor<CLTensor>(TensorShape(3U, 3U, 16U, 3U), DataType::F32);
+ auto bias = create_tensor<CLTensor>(TensorShape(3U), DataType::F32);
+ auto dst = create_tensor<CLTensor>(TensorShape(13U, 13U, 3U), DataType::F32);
+
+ // Create kernel
+ cl::NDRange fake_lws(2000);
+ CLDirectConvolutionLayerKernel conv;
+ conv.set_target(GPUTarget::G72);
+
+ // Hard-wire lws to kernel and validate lws
+ conv.set_lws_hint(fake_lws);
+ ARM_COMPUTE_EXPECT(conv.lws_hint()[0] == 2000, framework::LogLevel::ERRORS);
+
+ // Configure
+ conv.configure(&src, &weights, &bias, &dst, PadStrideInfo(1, 1, 1, 1));
+ ARM_COMPUTE_EXPECT(conv.lws_hint()[0] == 2000, framework::LogLevel::ERRORS);
+
+ // Tune kernel and validate
+ tuner.tune_kernel_static(conv);
+ ARM_COMPUTE_EXPECT(conv.lws_hint()[0] != 2000, framework::LogLevel::ERRORS);
+
+ // Clear tuner
+ CLScheduler::get().default_init();
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute