aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/functions
diff options
context:
space:
mode:
authorSheri Zhang <sheri.zhang@arm.com>2021-02-02 11:49:34 +0000
committerSheri Zhang <sheri.zhang@arm.com>2021-02-08 13:35:26 +0000
commit7e20e29904c98adae5a91c6492fd78da88b7a9bf (patch)
tree96309359fd28c2244984ed1d4d1a9069528b64dc /src/runtime/CL/functions
parentafc9c3df7600dcecf12d3d3a4686d2008502a813 (diff)
downloadComputeLibrary-7e20e29904c98adae5a91c6492fd78da88b7a9bf.tar.gz
Make memset/copy functions state-less
Port following functions: - CLCopy - CLFill - CLPermute - CLReshapeLayer - CLCropResize Resolves: COMPMID-4002 Signed-off-by: Sheri Zhang <sheri.zhang@arm.com> Change-Id: I8392aa515aaeb5b44dab6122be6a795d08376d5f Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5003 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/CL/functions')
-rw-r--r--src/runtime/CL/functions/CLCopy.cpp56
-rw-r--r--src/runtime/CL/functions/CLCrop.cpp82
-rw-r--r--src/runtime/CL/functions/CLCropResize.cpp46
-rw-r--r--src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp9
-rw-r--r--src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLFFTConvolutionLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLFill.cpp50
-rw-r--r--src/runtime/CL/functions/CLGenerateProposalsLayer.cpp19
-rw-r--r--src/runtime/CL/functions/CLLSTMLayer.cpp37
-rw-r--r--src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp11
-rw-r--r--src/runtime/CL/functions/CLPadLayer.cpp11
-rw-r--r--src/runtime/CL/functions/CLPermute.cpp47
-rw-r--r--src/runtime/CL/functions/CLQLSTMLayer.cpp11
-rw-r--r--src/runtime/CL/functions/CLRNNLayer.cpp11
-rw-r--r--src/runtime/CL/functions/CLReshapeLayer.cpp35
-rw-r--r--src/runtime/CL/functions/CLSpaceToBatchLayer.cpp16
16 files changed, 300 insertions, 147 deletions
diff --git a/src/runtime/CL/functions/CLCopy.cpp b/src/runtime/CL/functions/CLCopy.cpp
index c3e30ada6e..98916bf38a 100644
--- a/src/runtime/CL/functions/CLCopy.cpp
+++ b/src/runtime/CL/functions/CLCopy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,30 +23,58 @@
*/
#include "arm_compute/runtime/CL/functions/CLCopy.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/PixelValue.h"
-#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
-#include "src/core/CL/kernels/CLCopyKernel.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClCopy.h"
#include <utility>
-using namespace arm_compute;
+namespace arm_compute
+{
+struct CLCopy::Impl
+{
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClCopy> op{ nullptr };
+};
+
+CLCopy::CLCopy()
+ : _impl(std::make_unique<Impl>())
+{
+}
+CLCopy::CLCopy(CLCopy &&) = default;
+CLCopy &CLCopy::operator=(CLCopy &&) = default;
+CLCopy::~CLCopy() = default;
-void CLCopy::configure(ICLTensor *input, ICLTensor *output)
+void CLCopy::configure(ICLTensor *input, ICLTensor *output, Window *dst_window)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, output);
+ configure(CLKernelLibrary::get().get_compile_context(), input, output, dst_window);
+}
+
+void CLCopy::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, Window *dst_window)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+
+ _impl->src = input;
+ _impl->dst = output;
+
+ _impl->op = std::make_unique<opencl::ClCopy>();
+ _impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), dst_window);
}
-void CLCopy::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output)
+Status CLCopy::validate(const ITensorInfo *input, const ITensorInfo *output, Window *dst_window)
{
- auto k = std::make_unique<CLCopyKernel>();
- k->configure(compile_context, input, output);
- _kernel = std::move(k);
+ return opencl::ClCopy::validate(input, output, dst_window);
}
-Status CLCopy::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output)
+void CLCopy::run()
{
- return CLCopyKernel::validate(input, output);
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+ pack.add_tensor(TensorType::ACL_DST, _impl->dst);
+ _impl->op->run(pack);
}
+} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLCrop.cpp b/src/runtime/CL/functions/CLCrop.cpp
new file mode 100644
index 0000000000..20cab4df5f
--- /dev/null
+++ b/src/runtime/CL/functions/CLCrop.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLCrop.h"
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClCrop.h"
+
+#include <utility>
+
+namespace arm_compute
+{
+struct CLCrop::Impl
+{
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClCrop> op{ nullptr };
+};
+
+CLCrop::CLCrop()
+ : _impl(std::make_unique<Impl>())
+{
+}
+CLCrop::CLCrop(CLCrop &&) = default;
+CLCrop &CLCrop::operator=(CLCrop &&) = default;
+CLCrop::~CLCrop() = default;
+
+void CLCrop::configure(const ICLTensor *src, ICLTensor *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value,
+ Window *dst_window)
+{
+ configure(CLKernelLibrary::get().get_compile_context(), src, dst, start, end, batch_index, extrapolation_value, dst_window);
+}
+
+void CLCrop::configure(const CLCompileContext &compile_context, const ICLTensor *src, ICLTensor *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value,
+ Window *dst_window)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+ _impl->src = src;
+ _impl->dst = dst;
+
+ _impl->op = std::make_unique<opencl::ClCrop>();
+ _impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), start, end, batch_index, extrapolation_value, dst_window);
+}
+
+Status CLCrop::validate(const ITensorInfo *input, const ITensorInfo *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value, Window *dst_window)
+{
+ return opencl::ClCrop::validate(input, output, start, end, batch_index, extrapolation_value, dst_window);
+}
+
+void CLCrop::run()
+{
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+ pack.add_tensor(TensorType::ACL_DST, _impl->dst);
+ _impl->op->run(pack);
+}
+} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLCropResize.cpp b/src/runtime/CL/functions/CLCropResize.cpp
index ed31446cf9..77c44d539b 100644
--- a/src/runtime/CL/functions/CLCropResize.cpp
+++ b/src/runtime/CL/functions/CLCropResize.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,10 +25,7 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLCopyKernel.h"
-#include "src/core/CL/kernels/CLCropKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLMemsetKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -59,7 +56,7 @@ inline void configure_crop(const ICLTensor *input, ICLTensor *crop_boxes, ICLTen
} // namespace
CLCropResize::CLCropResize()
- : _input(nullptr), _boxes(nullptr), _box_ind(nullptr), _output(nullptr), _num_boxes(0), _method(), _extrapolation_value(0), _scale(), _copy(), _crop_results(), _scaled_results(), _internal_kernels()
+ : _input(nullptr), _boxes(nullptr), _box_ind(nullptr), _output(nullptr), _num_boxes(0), _method(), _extrapolation_value(0), _scale(), _copy(), _crop_results(), _scaled_results(), _internal_functions()
{
}
@@ -73,7 +70,7 @@ Status CLCropResize::validate(const ITensorInfo *input, ITensorInfo *boxes, ITen
ARM_COMPUTE_RETURN_ERROR_ON(boxes->tensor_shape()[0] != 4);
ARM_COMPUTE_RETURN_ERROR_ON(boxes->tensor_shape()[1] != box_ind->tensor_shape()[0]);
TensorInfo temp_info;
- ARM_COMPUTE_RETURN_ON_ERROR(CLCropKernel::validate(input->clone().get(), &temp_info, { 0, 0 }, { 1, 1 }, input->dimension(3) - 1, extrapolation_value));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLCrop::validate(input->clone().get(), &temp_info, { 0, 0 }, { 1, 1 }, input->dimension(3) - 1, extrapolation_value));
if(output->total_size() > 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(output, DataType::F32);
@@ -111,11 +108,11 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
// For each crop box:
// - The initial cropped image is produced as specified by boxes[i] from the 3D image input[box_ind[i]].
- // Possibly using a CLCropKernel and up to four CLMemsetKernels.
+ // Possibly using a CLCrop and up to four CLFills.
// - A tensor is required to hold this initial cropped image.
// - A scale function is used to resize the cropped image to the size specified by crop_size.
// - A tensor is required to hold the final scaled image before it is copied into the 4D output
- // that will hold all final cropped and scaled 3D images using CLCopyKernel.
+ // that will hold all final cropped and scaled 3D images using CLCopy.
// The contents of _boxes and _box_ind are required to calculate the shape
// of the initial cropped image and thus are required to configure the
@@ -149,7 +146,7 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
Window win = calculate_max_window(*_output->info());
win.set(3, Window::Dimension(num_box, num_box + 1, 1));
- auto copy_kernel = std::make_unique<CLCopyKernel>();
+ auto copy_kernel = std::make_unique<CLCopy>();
copy_kernel->configure(compile_context, _scaled_results[num_box].get(), _output, &win);
_copy.emplace_back(std::move(copy_kernel));
@@ -207,9 +204,10 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
{
Window slice_fill_rows_before(full_window);
slice_fill_rows_before.set(2, Window::Dimension(0, rows_out_of_bounds[0], 1));
- auto kernel = std::make_unique<CLMemsetKernel>();
+ auto kernel = std::make_unique<CLFill>();
kernel->configure(compile_context, _crop_results[num_box].get(), extrapolation_value, &slice_fill_rows_before);
- _internal_kernels.push_back(std::move(kernel));
+ //_internal_functions.emplace_back(std::move(kernel));
+ _internal_functions.push_back(std::move(kernel));
}
Window slice_in(full_window);
@@ -224,18 +222,20 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
{
Window slice_fill_cols_before(slice_in);
slice_fill_cols_before.set(1, Window::Dimension(0, cols_out_of_bounds[0], 1));
- auto kernel = std::make_unique<CLMemsetKernel>();
+ auto kernel = std::make_unique<CLFill>();
kernel->configure(compile_context, _crop_results[num_box].get(), extrapolation_value, &slice_fill_cols_before);
- _internal_kernels.push_back(std::move(kernel));
+ //_internal_functions.emplace_back(std::move(kernel));
+ _internal_functions.push_back(std::move(kernel));
}
if(cols_out_of_bounds[1] > 0)
{
Window slice_fill_cols_after(slice_in);
slice_fill_cols_after.set(1, Window::Dimension(_crop_results[num_box].get()->info()->dimension(1) - cols_out_of_bounds[1], _crop_results[num_box].get()->info()->dimension(1), 1));
- auto kernel = std::make_unique<CLMemsetKernel>();
+ auto kernel = std::make_unique<CLFill>();
kernel->configure(compile_context, _crop_results[num_box].get(), extrapolation_value, &slice_fill_cols_after);
- _internal_kernels.push_back(std::move(kernel));
+ //_internal_functions.emplace_back(std::move(kernel));
+ _internal_functions.push_back(std::move(kernel));
}
// Copy all elements within the input bounds from the input tensor.
@@ -246,10 +246,11 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
is_height_flipped ? start[1] - rows_out_of_bounds[0] : start[1] + rows_out_of_bounds[0] };
Coordinates2D end_in{ is_width_flipped ? start_in.x - cols_in_bounds + 1 : start_in.x + cols_in_bounds - 1,
is_height_flipped ? start_in.y - rows_in_bounds + 1 : start_in.y + rows_in_bounds - 1 };
- auto kernel = std::make_unique<CLCropKernel>();
+ auto kernel = std::make_unique<CLCrop>();
kernel->configure(compile_context, _input, _crop_results[num_box].get(), start_in, end_in, batch_index, extrapolation_value, &slice_in);
- _internal_kernels.push_back(std::move(kernel));
+ //_internal_functions.emplace_back(std::move(kernel));
+ _internal_functions.push_back(std::move(kernel));
}
}
@@ -258,9 +259,10 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
{
Window slice_fill_rows_after(full_window);
slice_fill_rows_after.set(2, Window::Dimension(_crop_results[num_box].get()->info()->dimension(2) - rows_out_of_bounds[1], _crop_results[num_box].get()->info()->dimension(2), 1));
- auto kernel = std::make_unique<CLMemsetKernel>();
+ auto kernel = std::make_unique<CLFill>();
kernel->configure(compile_context, _crop_results[num_box].get(), extrapolation_value, &slice_fill_rows_after);
- _internal_kernels.push_back(std::move(kernel));
+ //_internal_functions.emplace_back(std::move(kernel));
+ _internal_functions.push_back(std::move(kernel));
}
}
_boxes->unmap(CLScheduler::get().queue());
@@ -272,9 +274,9 @@ void CLCropResize::run()
{
ARM_COMPUTE_ERROR_ON_MSG(_output == nullptr, "Unconfigured function");
- for(unsigned int i = 0; i < _internal_kernels.size(); ++i)
+ for(unsigned int i = 0; i < _internal_functions.size(); ++i)
{
- CLScheduler::get().enqueue(*(_internal_kernels[i]));
+ _internal_functions[i]->run();
}
CLScheduler::get().sync();
@@ -285,7 +287,7 @@ void CLCropResize::run()
CLScheduler::get().sync();
for(auto &kernel : _copy)
{
- CLScheduler::get().enqueue(*kernel, true);
+ kernel->run();
}
CLScheduler::get().sync();
}
diff --git a/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp b/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp
index 4989f6460d..c371558f30 100644
--- a/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp
+++ b/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,13 +28,12 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
-#include "src/core/CL/kernels/CLMemsetKernel.h"
namespace arm_compute
{
CLDeconvolutionLayerUpsample::CLDeconvolutionLayerUpsample() // NOLINT
: _upsample(std::make_unique<CLDeconvolutionLayerUpsampleKernel>()),
- _memset(std::make_unique<CLMemsetKernel>()),
+ _fill(),
_output(nullptr)
{
}
@@ -56,13 +55,13 @@ void CLDeconvolutionLayerUpsample::configure(const CLCompileContext &compile_con
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
_output = output;
- _memset->configure(compile_context, _output, PixelValue(0, _output->info()->data_type(), _output->info()->quantization_info()));
+ _fill.configure(compile_context, _output, PixelValue(0, _output->info()->data_type(), _output->info()->quantization_info()));
_upsample->configure(compile_context, input, _output, info);
}
void CLDeconvolutionLayerUpsample::run()
{
- CLScheduler::get().enqueue(*_memset, false);
+ _fill.run();
CLScheduler::get().enqueue(*_upsample, true);
}
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp
index 0e3109439e..d802ef2dc9 100644
--- a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,7 +32,6 @@
#include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
-#include "src/core/CL/kernels/CLMemsetKernel.h"
#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
diff --git a/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp b/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp
index 45e74df703..41b02d03f2 100644
--- a/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,7 +29,6 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CPP/CPPScheduler.h"
-#include "src/core/CL/kernels/CLCopyKernel.h"
#include "src/core/CL/kernels/CLFFTDigitReverseKernel.h"
#include "src/core/CL/kernels/CLFFTRadixStageKernel.h"
#include "src/core/CL/kernels/CLFFTScaleKernel.h"
diff --git a/src/runtime/CL/functions/CLFill.cpp b/src/runtime/CL/functions/CLFill.cpp
index 30843d8cc0..b22d79fea4 100644
--- a/src/runtime/CL/functions/CLFill.cpp
+++ b/src/runtime/CL/functions/CLFill.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,22 +23,56 @@
*/
#include "arm_compute/runtime/CL/functions/CLFill.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Types.h"
-#include "src/core/CL/kernels/CLMemsetKernel.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClFill.h"
#include <utility>
namespace arm_compute
{
-void CLFill::configure(ICLTensor *tensor, PixelValue constant_value)
+struct CLFill::Impl
{
- configure(CLKernelLibrary::get().get_compile_context(), tensor, constant_value);
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClFill> op{ nullptr };
+};
+
+CLFill::CLFill()
+ : _impl(std::make_unique<Impl>())
+{
+}
+CLFill::CLFill(CLFill &&) = default;
+CLFill &CLFill::operator=(CLFill &&) = default;
+CLFill::~CLFill() = default;
+
+void CLFill::configure(ICLTensor *tensor, const PixelValue &constant_value, Window *dst_window)
+{
+ configure(CLKernelLibrary::get().get_compile_context(), tensor, constant_value, dst_window);
+}
+
+void CLFill::configure(const CLCompileContext &compile_context, ICLTensor *tensor, const PixelValue &constant_value, Window *dst_window)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
+
+ _impl->src = tensor;
+
+ _impl->op = std::make_unique<opencl::ClFill>();
+ _impl->op->configure(compile_context, _impl->src->info(), constant_value, dst_window);
+}
+
+Status CLFill::validate(const ITensorInfo *tensor, const PixelValue &constant_value, Window *dst_window)
+{
+ return opencl::ClFill::validate(tensor, constant_value, dst_window);
}
-void CLFill::configure(const CLCompileContext &compile_context, ICLTensor *tensor, PixelValue constant_value)
+void CLFill::run()
{
- auto k = std::make_unique<CLMemsetKernel>();
- k->configure(compile_context, tensor, constant_value);
- _kernel = std::move(k);
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+ _impl->op->run(pack);
}
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp b/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp
index e536816f97..365f95243f 100644
--- a/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp
+++ b/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,7 +29,6 @@
#include "src/core/CL/kernels/CLDequantizationLayerKernel.h"
#include "src/core/CL/kernels/CLGenerateProposalsLayerKernel.h"
#include "src/core/CL/kernels/CLPadLayerKernel.h"
-#include "src/core/CL/kernels/CLPermuteKernel.h"
#include "src/core/CL/kernels/CLQuantizationLayerKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
@@ -37,9 +36,9 @@ namespace arm_compute
{
CLGenerateProposalsLayer::CLGenerateProposalsLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(memory_manager),
- _permute_deltas_kernel(std::make_unique<CLPermuteKernel>()),
+ _permute_deltas(),
_flatten_deltas(),
- _permute_scores_kernel(std::make_unique<CLPermuteKernel>()),
+ _permute_scores(),
_flatten_scores(),
_compute_anchors_kernel(std::make_unique<CLComputeAllAnchorsKernel>()),
_bounding_box_kernel(std::make_unique<CLBoundingBoxTransformKernel>()),
@@ -110,7 +109,7 @@ void CLGenerateProposalsLayer::configure(const CLCompileContext &compile_context
if(!_is_nhwc)
{
_memory_group.manage(&_deltas_permuted);
- _permute_deltas_kernel->configure(compile_context, deltas, &_deltas_permuted, PermutationVector{ 2, 0, 1 });
+ _permute_deltas.configure(compile_context, deltas, &_deltas_permuted, PermutationVector{ 2, 0, 1 });
_flatten_deltas.configure(compile_context, &_deltas_permuted, &_deltas_flattened);
_deltas_permuted.allocator()->allocate();
}
@@ -127,7 +126,7 @@ void CLGenerateProposalsLayer::configure(const CLCompileContext &compile_context
if(!_is_nhwc)
{
_memory_group.manage(&_scores_permuted);
- _permute_scores_kernel->configure(compile_context, scores, &_scores_permuted, PermutationVector{ 2, 0, 1 });
+ _permute_scores.configure(compile_context, scores, &_scores_permuted, PermutationVector{ 2, 0, 1 });
_flatten_scores.configure(compile_context, &_scores_permuted, &_scores_flattened);
_scores_permuted.allocator()->allocate();
}
@@ -244,8 +243,8 @@ Status CLGenerateProposalsLayer::validate(const ITensorInfo *scores, const ITens
}
else
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLPermuteKernel::validate(deltas, &deltas_permuted_info, PermutationVector{ 2, 0, 1 }));
- ARM_COMPUTE_RETURN_ON_ERROR(CLPermuteKernel::validate(scores, &scores_permuted_info, PermutationVector{ 2, 0, 1 }));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(deltas, &deltas_permuted_info, PermutationVector{ 2, 0, 1 }));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(scores, &scores_permuted_info, PermutationVector{ 2, 0, 1 }));
}
TensorInfo deltas_flattened_info(deltas->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true));
@@ -356,8 +355,8 @@ void CLGenerateProposalsLayer::run()
// Transpose and reshape the inputs
if(!_is_nhwc)
{
- CLScheduler::get().enqueue(*_permute_deltas_kernel, false);
- CLScheduler::get().enqueue(*_permute_scores_kernel, false);
+ _permute_deltas.run();
+ _permute_scores.run();
}
_flatten_deltas.run();
_flatten_scores.run();
diff --git a/src/runtime/CL/functions/CLLSTMLayer.cpp b/src/runtime/CL/functions/CLLSTMLayer.cpp
index 77df917119..5036126aea 100644
--- a/src/runtime/CL/functions/CLLSTMLayer.cpp
+++ b/src/runtime/CL/functions/CLLSTMLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,7 +29,6 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLCopyKernel.h"
#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
@@ -42,7 +41,6 @@
#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
-#include "src/core/CL/kernels/CLMemsetKernel.h"
#include "src/core/CL/kernels/CLTransposeKernel.h"
namespace arm_compute
@@ -55,15 +53,14 @@ CLLSTMLayer::CLLSTMLayer(std::shared_ptr<IMemoryManager> memory_manager)
_fully_connected_forget_gate(), _accum_forget_gate1(), _pixelwise_mul_forget_gate(), _activation_forget_gate(), _fully_connected_cell_state(), _gemm_cell_state1(),
_transpose_cell_state(std::make_unique<CLTransposeKernel>()), _accum_cell_state1(), _accum_cell_state2(), _pixelwise_mul_cell_state1(), _activation_cell_state(), _cell_clip(),
_pixelwise_mul_cell_state2(), _fully_connected_output(), _pixelwise_mul_output_state1(), _accum_output1(), _activation_output(), _activation_output_state(), _pixelwise_mul_output_state2(),
- _fully_connected_output_state(), _projection_clip(), _copy_cell_state(std::make_unique<CLCopyKernel>()), _copy_output(std::make_unique<CLCopyKernel>()), _concat_scratch_buffer(),
- _concat_inputs_forget_gate(), _concat_weights_forget_gate(), _concat_weights_input_gate(), _concat_weights_output(), _ones_memset_kernel(std::make_unique<CLMemsetKernel>()),
- _mean_std_norm_input_gate(), _pixelwise_mul_input_gate_coeff(), _accum_input_gate_bias(), _mean_std_norm_forget_gate(), _pixelwise_mul_forget_gate_coeff(), _accum_forget_gate_bias(),
- _mean_std_norm_cell_gate(), _pixelwise_mul_cell_gate_coeff(), _accum_cell_gate_bias(), _mean_std_norm_output_gate(), _pixelwise_mul_output_gate_coeff(), _accum_output_gate_bias(), _input_gate_out1(),
- _input_gate_out2(), _input_gate_out3(), _input_gate_out4(), _forget_gate_out1(), _forget_gate_out2(), _forget_gate_out3(), _forget_gate_out4(), _forget_gate_out5(), _forget_gate_out6(),
- _cell_state_out1(), _cell_state_out2(), _cell_state_out3(), _cell_state_out4(), _cell_state_out5(), _output1(), _output2(), _output3(), _output4(), _cell_state_activation(), _output_state1(), _ones(),
- _input_layer_norm_out1(), _input_layer_norm_out2(), _forget_layer_norm_out1(), _forget_layer_norm_out2(), _cell_layer_norm_out1(), _cell_layer_norm_out2(), _output_layer_norm_out1(),
- _output_layer_norm_out2(), _run_peephole_opt(false), _run_cifg_opt(false), _perform_cell_clipping(false), _has_projection_weights(false), _perform_projection_clipping(false), _is_prepared(false),
- _is_layer_norm_lstm(false)
+ _fully_connected_output_state(), _projection_clip(), _copy_cell_state(), _copy_output(), _concat_scratch_buffer(), _concat_inputs_forget_gate(), _concat_weights_forget_gate(),
+ _concat_weights_input_gate(), _concat_weights_output(), _ones_fill(), _mean_std_norm_input_gate(), _pixelwise_mul_input_gate_coeff(), _accum_input_gate_bias(), _mean_std_norm_forget_gate(),
+ _pixelwise_mul_forget_gate_coeff(), _accum_forget_gate_bias(), _mean_std_norm_cell_gate(), _pixelwise_mul_cell_gate_coeff(), _accum_cell_gate_bias(), _mean_std_norm_output_gate(),
+ _pixelwise_mul_output_gate_coeff(), _accum_output_gate_bias(), _input_gate_out1(), _input_gate_out2(), _input_gate_out3(), _input_gate_out4(), _forget_gate_out1(), _forget_gate_out2(),
+ _forget_gate_out3(), _forget_gate_out4(), _forget_gate_out5(), _forget_gate_out6(), _cell_state_out1(), _cell_state_out2(), _cell_state_out3(), _cell_state_out4(), _cell_state_out5(), _output1(),
+ _output2(), _output3(), _output4(), _cell_state_activation(), _output_state1(), _ones(), _input_layer_norm_out1(), _input_layer_norm_out2(), _forget_layer_norm_out1(), _forget_layer_norm_out2(),
+ _cell_layer_norm_out1(), _cell_layer_norm_out2(), _output_layer_norm_out1(), _output_layer_norm_out2(), _run_peephole_opt(false), _run_cifg_opt(false), _perform_cell_clipping(false),
+ _has_projection_weights(false), _perform_projection_clipping(false), _is_prepared(false), _is_layer_norm_lstm(false)
{
}
@@ -190,7 +187,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe
{
_memory_group.manage(&_input_gate_out1);
_ones.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
- _ones_memset_kernel->configure(compile_context, &_ones, PixelValue(1, _ones.info()->data_type()));
+ _ones_fill.configure(compile_context, &_ones, PixelValue(1, _ones.info()->data_type()));
_subtract_input_gate.configure(compile_context, &_ones, forget_gate_out, &_input_gate_out1, ConvertPolicy::SATURATE);
_ones.allocator()->allocate();
_run_cifg_opt = true;
@@ -385,8 +382,8 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe
}
// Copy cell state and output
- _copy_cell_state->configure(compile_context, &_cell_state_out1, cell_state_out);
- _copy_output->configure(compile_context, output_state_out, output);
+ _copy_cell_state.configure(compile_context, &_cell_state_out1, cell_state_out);
+ _copy_output.configure(compile_context, output_state_out, output);
// Vector for holding the tensors to store in scratch buffer
std::vector<const ICLTensor *> scratch_inputs;
@@ -618,8 +615,8 @@ Status CLLSTMLayer::validate(const ITensorInfo *input,
}
// Validate copy kernel
- ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(&cell_state_tmp, cell_state_out));
- ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(output_state_out, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLCopy::validate(&cell_state_tmp, cell_state_out));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLCopy::validate(output_state_out, output));
// Validate scratch concatenation
std::vector<const ITensorInfo *> inputs_vector_info_raw;
@@ -660,7 +657,7 @@ void CLLSTMLayer::run()
if(_run_cifg_opt)
{
- CLScheduler::get().enqueue(*_ones_memset_kernel);
+ _ones_fill.run();
_subtract_input_gate.run();
}
else
@@ -729,8 +726,8 @@ void CLLSTMLayer::run()
}
}
- CLScheduler::get().enqueue(*_copy_cell_state);
- CLScheduler::get().enqueue(*_copy_output);
+ _copy_cell_state.run();
+ _copy_output.run();
_concat_scratch_buffer.run();
}
diff --git a/src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp b/src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp
index c9deb301ef..52151cdfe1 100644
--- a/src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp
+++ b/src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,12 +28,11 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h"
-#include "src/core/CL/kernels/CLMemsetKernel.h"
namespace arm_compute
{
CLMaxUnpoolingLayer::CLMaxUnpoolingLayer()
- : _memset_kernel(std::make_unique<CLMemsetKernel>()),
+ : _fill(),
_unpooling_layer_kernel(std::make_unique<CLMaxUnpoolingLayerKernel>())
{
}
@@ -48,7 +47,7 @@ void CLMaxUnpoolingLayer::configure(ICLTensor *input, ICLTensor *indices, ICLTen
void CLMaxUnpoolingLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *indices, ICLTensor *output, const PoolingLayerInfo &pool_info)
{
const PixelValue zero_value(0.f);
- _memset_kernel->configure(output, zero_value);
+ _fill.configure(output, zero_value);
_unpooling_layer_kernel->configure(compile_context, input, indices, output, pool_info);
}
@@ -60,8 +59,8 @@ Status CLMaxUnpoolingLayer::validate(const ITensorInfo *input, const ITensorInfo
void CLMaxUnpoolingLayer::run()
{
- // Run memset
- CLScheduler::get().enqueue(*_memset_kernel, false);
+ // Run fill
+ _fill.run();
// Run max unpooling layer
CLScheduler::get().enqueue(*_unpooling_layer_kernel);
diff --git a/src/runtime/CL/functions/CLPadLayer.cpp b/src/runtime/CL/functions/CLPadLayer.cpp
index 8c5d529117..d105c0597c 100644
--- a/src/runtime/CL/functions/CLPadLayer.cpp
+++ b/src/runtime/CL/functions/CLPadLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,14 +22,13 @@
* SOFTWARE.
*/
#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
-#include "src/core/CL/kernels/CLCopyKernel.h"
#include "src/core/CL/kernels/CLPadLayerKernel.h"
namespace arm_compute
{
CLPadLayer::CLPadLayer()
: _pad_kernel(std::make_unique<CLPadLayerKernel>()),
- _copy_kernel(std::make_unique<CLCopyKernel>()),
+ _copy(),
_perform_pad(false)
{
}
@@ -57,7 +56,7 @@ void CLPadLayer::configure(const CLCompileContext &compile_context, ICLTensor *i
else
{
// Copy the input to the whole output if no padding is applied
- _copy_kernel->configure(compile_context, input, output);
+ _copy.configure(compile_context, input, output);
}
}
Status CLPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
@@ -73,7 +72,7 @@ Status CLPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output,
}
else
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(input, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLCopy::validate(input, output));
}
return Status{};
}
@@ -85,7 +84,7 @@ void CLPadLayer::run()
}
else
{
- CLScheduler::get().enqueue(*_copy_kernel);
+ _copy.run();
}
}
} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLPermute.cpp b/src/runtime/CL/functions/CLPermute.cpp
index 31b152c553..eeb0169241 100644
--- a/src/runtime/CL/functions/CLPermute.cpp
+++ b/src/runtime/CL/functions/CLPermute.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,12 +23,30 @@
*/
#include "arm_compute/runtime/CL/functions/CLPermute.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Error.h"
-#include "src/core/CL/kernels/CLPermuteKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClPermute.h"
namespace arm_compute
{
+struct CLPermute::Impl
+{
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClPermute> op{ nullptr };
+};
+
+CLPermute::CLPermute()
+ : _impl(std::make_unique<Impl>())
+{
+}
+CLPermute::CLPermute(CLPermute &&) = default;
+CLPermute &CLPermute::operator=(CLPermute &&) = default;
+CLPermute::~CLPermute() = default;
+
void CLPermute::configure(const ICLTensor *input, ICLTensor *output, const PermutationVector &perm)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, perm);
@@ -36,14 +54,25 @@ void CLPermute::configure(const ICLTensor *input, ICLTensor *output, const Permu
void CLPermute::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PermutationVector &perm)
{
- auto k = std::make_unique<CLPermuteKernel>();
- k->configure(compile_context, input, output, perm);
- _kernel = std::move(k);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ _impl->src = input;
+ _impl->dst = output;
+
+ _impl->op = std::make_unique<opencl::ClPermute>();
+ _impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), perm);
}
Status CLPermute::validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLPermuteKernel::validate(input, output, perm));
- return Status{};
+ return opencl::ClPermute::validate(input, output, perm);
+}
+
+void CLPermute::run()
+{
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+ pack.add_tensor(TensorType::ACL_DST, _impl->dst);
+ _impl->op->run(pack);
}
-} // namespace arm_compute
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLQLSTMLayer.cpp b/src/runtime/CL/functions/CLQLSTMLayer.cpp
index 4395a39060..e7a0e5765e 100644
--- a/src/runtime/CL/functions/CLQLSTMLayer.cpp
+++ b/src/runtime/CL/functions/CLQLSTMLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,6 @@
#include "arm_compute/core/utils/misc/InfoHelpers.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLCopyKernel.h"
#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
@@ -106,7 +105,7 @@ CLQLSTMLayer::CLQLSTMLayer(std::shared_ptr<IMemoryManager> memory_manager)
_recurrent_to_output_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
_projection_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
_layer_norms(),
- _copy_output(std::make_unique<CLCopyKernel>())
+ _copy_output()
{
for(auto &norm : _layer_norms)
{
@@ -593,7 +592,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT
}
// Copy output_state_out to output
- _copy_output->configure(compile_context, output_state_out, output);
+ _copy_output.configure(compile_context, output_state_out, output);
}
Status CLQLSTMLayer::validate(const ITensorInfo *input,
@@ -957,7 +956,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input,
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output_state_in, output_state_out);
}
- ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(output_state_out, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLCopy::validate(output_state_out, output));
return Status{};
}
@@ -1099,7 +1098,7 @@ void CLQLSTMLayer::run()
}
// Copy output_state_out to output
- CLScheduler::get().enqueue(*_copy_output);
+ _copy_output.run();
}
void CLQLSTMLayer::prepare()
diff --git a/src/runtime/CL/functions/CLRNNLayer.cpp b/src/runtime/CL/functions/CLRNNLayer.cpp
index 2a99ece388..967f4aa41b 100644
--- a/src/runtime/CL/functions/CLRNNLayer.cpp
+++ b/src/runtime/CL/functions/CLRNNLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,7 +28,6 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLCopyKernel.h"
#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
@@ -47,8 +46,8 @@ namespace arm_compute
using namespace arm_compute::misc::shape_calculator;
CLRNNLayer::CLRNNLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_kernel(), _activation(), _fully_connected_kernel(), _copy_kernel(std::make_unique<CLCopyKernel>()), _fully_connected_out(),
- _gemm_output(), _add_output(), _is_prepared(false)
+ : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_kernel(), _activation(), _fully_connected_kernel(), _copy(), _fully_connected_out(), _gemm_output(), _add_output(),
+ _is_prepared(false)
{
}
@@ -122,7 +121,7 @@ void CLRNNLayer::configure(const CLCompileContext &compile_context, const ICLTen
_activation.configure(compile_context, &_add_output, hidden_state, info);
_add_output.allocator()->allocate();
- _copy_kernel->configure(compile_context, hidden_state, output);
+ _copy.configure(compile_context, hidden_state, output);
}
void CLRNNLayer::run()
@@ -137,7 +136,7 @@ void CLRNNLayer::run()
_activation.run();
// copy hidden out to output
- CLScheduler::get().enqueue(*_copy_kernel);
+ _copy.run();
}
void CLRNNLayer::prepare()
diff --git a/src/runtime/CL/functions/CLReshapeLayer.cpp b/src/runtime/CL/functions/CLReshapeLayer.cpp
index 9abaa1b4e1..060eddb96c 100644
--- a/src/runtime/CL/functions/CLReshapeLayer.cpp
+++ b/src/runtime/CL/functions/CLReshapeLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,32 +23,21 @@
*/
#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
-#include "src/core/CL/kernels/CLReshapeLayerKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClReshape.h"
/** [CLReshapeLayer snippet] **/
namespace arm_compute
{
-namespace experimental
-{
-void CLReshape::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
-{
- auto k = std::make_unique<CLReshapeLayerKernel>();
- k->configure(compile_context, input, output);
- _kernel = std::move(k);
-}
-
-Status CLReshape::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
- return arm_compute::CLReshapeLayerKernel::validate(input, output);
-}
-} // namespace experimental
-
struct CLReshapeLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLReshape> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClReshape> op{ nullptr };
};
CLReshapeLayer::CLReshapeLayer()
@@ -69,14 +58,14 @@ void CLReshapeLayer::configure(const CLCompileContext &compile_context, const IC
{
_impl->src = input;
_impl->dst = output;
- _impl->op = std::make_unique<experimental::CLReshape>();
+ _impl->op = std::make_unique<opencl::ClReshape>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLReshapeLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(experimental::CLReshape::validate(input, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(opencl::ClReshape::validate(input, output));
return Status{};
}
@@ -89,4 +78,4 @@ void CLReshapeLayer::run()
_impl->op->run(pack);
}
} // namespace arm_compute
-/** [CLReshapeLayer snippet] **/
+/** [CLReshapeLayer snippet] **/ \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLSpaceToBatchLayer.cpp b/src/runtime/CL/functions/CLSpaceToBatchLayer.cpp
index 2db064af44..6180f4de07 100644
--- a/src/runtime/CL/functions/CLSpaceToBatchLayer.cpp
+++ b/src/runtime/CL/functions/CLSpaceToBatchLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,14 +29,13 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLMemsetKernel.h"
#include "src/core/CL/kernels/CLSpaceToBatchLayerKernel.h"
namespace arm_compute
{
CLSpaceToBatchLayer::CLSpaceToBatchLayer()
: _space_to_batch_kernel(std::make_unique<CLSpaceToBatchLayerKernel>()),
- _memset_kernel(std::make_unique<CLMemsetKernel>()),
+ _fill(),
_has_padding(false)
{
}
@@ -55,7 +54,7 @@ void CLSpaceToBatchLayer::configure(const CLCompileContext &compile_context, con
if(input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size())
{
_has_padding = true;
- _memset_kernel->configure(compile_context, output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
+ _fill.configure(compile_context, output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
}
_space_to_batch_kernel->configure(compile_context, input, block_shape, paddings, output);
}
@@ -73,14 +72,14 @@ void CLSpaceToBatchLayer::configure(const CLCompileContext &compile_context, con
if(input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size())
{
_has_padding = true;
- _memset_kernel->configure(compile_context, output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
+ _fill.configure(compile_context, output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
}
_space_to_batch_kernel->configure(compile_context, input, block_shape_x, block_shape_y, padding_left, padding_right, output);
}
Status CLSpaceToBatchLayer::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *paddings, const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLMemsetKernel::validate(output, PixelValue(0, input->data_type(), input->quantization_info())));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFill::validate(output, PixelValue(0, input->data_type(), input->quantization_info())));
ARM_COMPUTE_RETURN_ON_ERROR(CLSpaceToBatchLayerKernel::validate(input, block_shape, paddings, output));
return Status{};
@@ -89,7 +88,7 @@ Status CLSpaceToBatchLayer::validate(const ITensorInfo *input, const ITensorInfo
Status CLSpaceToBatchLayer::validate(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLMemsetKernel::validate(output, PixelValue(0, input->data_type(), input->quantization_info())));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFill::validate(output, PixelValue(0, input->data_type(), input->quantization_info())));
ARM_COMPUTE_RETURN_ON_ERROR(CLSpaceToBatchLayerKernel::validate(input, block_shape_x, block_shape_y, padding_left, padding_right, output));
return Status{};
@@ -100,7 +99,8 @@ void CLSpaceToBatchLayer::run()
// Zero out output only if we have paddings
if(_has_padding)
{
- CLScheduler::get().enqueue(*_memset_kernel, true);
+ //CLScheduler::get().enqueue(*_fill, true);
+ _fill.run();
}
CLScheduler::get().enqueue(*_space_to_batch_kernel, true);
}