aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/CL/functions/CLPoolingLayer.cpp83
-rw-r--r--src/runtime/CL/tuners/BifrostTuner.cpp12
-rw-r--r--src/runtime/gpu/cl/operators/ClPooling.cpp101
-rw-r--r--src/runtime/gpu/cl/operators/ClPooling.h75
4 files changed, 216 insertions, 55 deletions
diff --git a/src/runtime/CL/functions/CLPoolingLayer.cpp b/src/runtime/CL/functions/CLPoolingLayer.cpp
index f3a2dbdd51..fbaec1d2d9 100644
--- a/src/runtime/CL/functions/CLPoolingLayer.cpp
+++ b/src/runtime/CL/functions/CLPoolingLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,13 +23,27 @@
*/
#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLPoolingLayerKernel.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClPooling.h"
namespace arm_compute
{
+struct CLPoolingLayer::Impl
+{
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ ICLTensor *indices{ nullptr };
+ std::unique_ptr<opencl::ClPooling> op{ nullptr };
+};
+
+CLPoolingLayer::CLPoolingLayer()
+ : _impl(std::make_unique<Impl>())
+{
+}
+CLPoolingLayer::~CLPoolingLayer() = default;
+
void CLPoolingLayer::configure(ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, pool_info, indices);
@@ -37,56 +51,25 @@ void CLPoolingLayer::configure(ICLTensor *input, ICLTensor *output, const Poolin
void CLPoolingLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input);
- // Configure pooling kernel
- auto k = std::make_unique<CLPoolingLayerKernel>();
- k->set_target(CLScheduler::get().target());
- k->configure(compile_context, input, output, pool_info, indices);
- _kernel = std::move(k);
-
- const DataType data_type = input->info()->data_type();
+ _impl->src = input;
+ _impl->dst = output;
+ _impl->indices = indices;
- // Configure border depending on operation required (quantize border in case of asymmetric data_type)
- BorderMode border_mode{};
- PixelValue pixel_value(0.f);
- if(is_data_type_quantized_asymmetric(data_type) && !pool_info.exclude_padding)
- {
- pixel_value = PixelValue(0, data_type, input->info()->quantization_info());
- }
-
- // Data layout
- const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->info()->data_layout() : pool_info.data_layout;
-
- switch(data_layout)
- {
- case DataLayout::NCHW:
- border_mode = (PoolingType::MAX == pool_info.pool_type) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
- break;
- case DataLayout::NHWC:
- border_mode = BorderMode::CONSTANT;
- if(PoolingType::MAX == pool_info.pool_type)
- {
- if(is_data_type_quantized(data_type))
- {
- std::tie(pixel_value, std::ignore) = get_min_max(data_type);
- }
- else
- {
- pixel_value = PixelValue(std::numeric_limits<float>::lowest());
- }
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Data layout not supported");
- }
- _border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, pixel_value);
-
- // Tune kernels
- CLScheduler::get().tune_kernel_static(*_kernel);
+ _impl->op = std::make_unique<opencl::ClPooling>();
+ _impl->op->configure(compile_context, input->info(), output->info(), pool_info, (indices) ? indices->info() : nullptr);
}
Status CLPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
{
- return CLPoolingLayerKernel::validate(input, output, pool_info, indices);
+ return opencl::ClPooling::validate(input, output, pool_info, indices);
+}
+
+void CLPoolingLayer::run()
+{
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+ pack.add_tensor(TensorType::ACL_DST_0, _impl->dst);
+ pack.add_tensor(TensorType::ACL_DST_1, _impl->indices);
+ _impl->op->run(pack);
}
} // namespace arm_compute
diff --git a/src/runtime/CL/tuners/BifrostTuner.cpp b/src/runtime/CL/tuners/BifrostTuner.cpp
index 8badd57b9e..7a06de6d1c 100644
--- a/src/runtime/CL/tuners/BifrostTuner.cpp
+++ b/src/runtime/CL/tuners/BifrostTuner.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,6 +27,8 @@
#include "src/core/CL/CLKernels.h"
#include "support/Cast.h"
+#include "src/core/gpu/cl/kernels/ClPoolingKernel.h"
+
namespace arm_compute
{
namespace tuners
@@ -208,7 +210,7 @@ void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k)
k.set_lws_hint(lws_hint);
}
-void tune_pooling_kernel(CLPoolingLayerKernel &k)
+void tune_pooling_kernel(opencl::kernels::ClPoolingKernel &k)
{
cl::NDRange lws_hint = k.lws_hint();
const GPUTarget gpu_target = k.get_target();
@@ -217,7 +219,7 @@ void tune_pooling_kernel(CLPoolingLayerKernel &k)
// On Bifrost, this works for up to 35x35xC filters, for which the pooling_layer_3_optimized
// kernel is launched with gws=(9, 33, C). In any case, the hint will be ignored if it is
// invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with).
- if(k._input->info()->data_layout() == DataLayout::NCHW)
+ if(k._pool_info.data_layout == DataLayout::NCHW)
{
if(gpu_target_is_in(gpu_target,
GPUTarget::G71, GPUTarget::G72, GPUTarget::G76,
@@ -279,9 +281,9 @@ void BifrostTuner::tune_kernel_static(ICLKernel &kernel)
{
tune_gemm_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixMultiplyKernel *>(&kernel));
}
- else if(dynamic_cast<CLPoolingLayerKernel *>(&kernel) != nullptr)
+ else if(dynamic_cast<opencl::kernels::ClPoolingKernel *>(&kernel) != nullptr)
{
- tune_pooling_kernel(*utils::cast::polymorphic_downcast<CLPoolingLayerKernel *>(&kernel));
+ tune_pooling_kernel(*utils::cast::polymorphic_downcast<opencl::kernels::ClPoolingKernel *>(&kernel));
}
else if(dynamic_cast<CLScaleKernel *>(&kernel) != nullptr)
{
diff --git a/src/runtime/gpu/cl/operators/ClPooling.cpp b/src/runtime/gpu/cl/operators/ClPooling.cpp
new file mode 100644
index 0000000000..8610eb9842
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClPooling.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/gpu/cl/operators/ClPooling.h"
+
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/kernels/ClPoolingKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+void ClPooling::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info, ITensorInfo *indices)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src);
+ // Configure pooling kernel
+ auto k = std::make_unique<kernels::ClPoolingKernel>();
+ k->set_target(CLScheduler::get().target());
+ k->configure(compile_context, src, dst, info, indices);
+ _pooling = std::move(k);
+
+ const DataType data_type = src->data_type();
+
+ // Configure border depending on operation required (quantize border in case of asymmetric data_type)
+ BorderMode border_mode{};
+ PixelValue pixel_value(0.f);
+ if(is_data_type_quantized_asymmetric(data_type) && !info.exclude_padding)
+ {
+ pixel_value = PixelValue(0, data_type, src->quantization_info());
+ }
+
+ // Data layout
+ const auto data_layout = info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : info.data_layout;
+
+ switch(data_layout)
+ {
+ case DataLayout::NCHW:
+ border_mode = (PoolingType::MAX == info.pool_type) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
+ break;
+ case DataLayout::NHWC:
+ border_mode = BorderMode::CONSTANT;
+ if(PoolingType::MAX == info.pool_type)
+ {
+ if(is_data_type_quantized(data_type))
+ {
+ std::tie(pixel_value, std::ignore) = get_min_max(data_type);
+ }
+ else
+ {
+ pixel_value = PixelValue(std::numeric_limits<float>::lowest());
+ }
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data layout not supported");
+ }
+ auto b = std::make_unique<CLFillBorderKernel>();
+ b->configure(compile_context, src, _pooling->border_size(), border_mode, pixel_value);
+ _border_handler = std::move(b);
+
+ // Tune kernels
+ CLScheduler::get().tune_kernel_static(*_pooling);
+}
+
+Status ClPooling::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info, const ITensorInfo *indices)
+{
+ return kernels::ClPoolingKernel::validate(src, dst, info, indices);
+}
+
+void ClPooling::run(ITensorPack &tensors)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
+
+ CLScheduler::get().enqueue_op(*_border_handler.get(), tensors, false);
+ CLScheduler::get().enqueue_op(*_pooling.get(), tensors, false);
+}
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClPooling.h b/src/runtime/gpu/cl/operators/ClPooling.h
new file mode 100644
index 0000000000..99de6d0dcf
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClPooling.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_POOLING_H
+#define ARM_COMPUTE_CL_POOLING_H
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace opencl
+{
+/** Basic function to simulate a pooling layer with the specified pooling operation. This function calls the following OpenCL kernels:
+ *
+ * -# @ref CLFillBorderKernel (executed if padding size is different from zero)
+ * -# @ref opencl::ClPooling
+ */
+class ClPooling : public IClOperator
+{
+public:
+ /** Constructor */
+ ClPooling() = default;
+ /** Configure operator for a given list of arguments
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[out] dst Destination tensor info. Data type supported: same as @p src
+ * @param[in] info Pooling layer parameters.
+ * @param[out] indices (optional) The indices info of the maximal values. Data type supported: U32.
+ */
+ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info, ITensorInfo *indices = nullptr);
+ /** Static function to check if given info will lead to a valid configuration of @ref ClPooling
+ *
+ * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[out] dst Destination tensor info. Data type supported: same as @p src
+ * @param[in] info Pooling layer parameters.
+ * @param[out] indices (optional) The indices info of the maximal values. Data type supported: U32.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info, const ITensorInfo *indices = nullptr);
+
+ // Inherited method overridden
+ void run(ITensorPack &tensors) override;
+
+private:
+ std::unique_ptr<ICLKernel> _pooling{ nullptr };
+ std::unique_ptr<ICLKernel> _border_handler{ nullptr };
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_POOLING_H */