aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2021-05-18 18:41:56 +0100
committerManuel Bottini <manuel.bottini@arm.com>2021-06-15 16:33:52 +0000
commitc6f4ec377027b21a67061efd21b65609079f98f9 (patch)
treed864f2092fff63790944fea7c8de5be46293bb43 /src/runtime
parent94f799e8f6f605333d40472860fb472e8ba6d83d (diff)
downloadComputeLibrary-c6f4ec377027b21a67061efd21b65609079f98f9.tar.gz
Port CLWinogradConvolutionLayer with ClWinogradConv2d
Port CLWinogradInputTransformKernel Port CLWinogradFilterTransformKernel Port CLWinogradOutputTransformKernel Resolves: COMPMID-4504 Change-Id: I3177dda0b9c2f56b36cb317027e94abe8d47229e Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5680 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp223
-rw-r--r--src/runtime/CL/functions/CLWinogradInputTransform.cpp50
-rw-r--r--src/runtime/gpu/cl/operators/ClWinogradConv2d.cpp299
-rw-r--r--src/runtime/gpu/cl/operators/ClWinogradConv2d.h126
4 files changed, 467 insertions, 231 deletions
diff --git a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
index 6b8b00414a..f758c3d0b3 100644
--- a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
@@ -23,79 +23,34 @@
*/
#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h"
-#include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h"
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/gpu/cl/operators/ClWinogradConv2d.h"
+#include "support/Cast.h"
-using namespace arm_compute;
-
-namespace
+namespace arm_compute
{
-Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims, DataLayout data_layout)
+struct CLWinogradConvolutionLayer::Impl
{
- Size2D output_tile = Size2D{};
-
- const unsigned int kernel_max_dim = std::max(kernel_dims.width, kernel_dims.height);
-
- // Check if the input spatial dimensions are smaller than 4
- const bool is_input_lt4_nchw = (input_dims.width <= 4 && input_dims.height <= 4) && (data_layout == DataLayout::NCHW);
-
- if(kernel_max_dim == 3U)
- {
- if(kernel_dims == Size2D(3U, 3U))
- {
- output_tile = is_input_lt4_nchw ? Size2D(2U, 2U) : Size2D(4U, 4U);
- }
- else if(kernel_dims == Size2D(3U, 1U))
- {
- output_tile = is_input_lt4_nchw ? Size2D(2U, 1U) : Size2D(4U, 1U);
- }
- else
- {
- output_tile = is_input_lt4_nchw ? Size2D(1U, 2U) : Size2D(1U, 4U);
- }
- }
- else if(kernel_max_dim == 5U)
- {
- output_tile = Size2D(kernel_dims.width == 1 ? 1U : 4U,
- kernel_dims.height == 1 ? 1U : 4U);
- }
- else if(kernel_max_dim == 7U)
- {
- output_tile = Size2D(kernel_dims.width == 1 ? 1U : 2U,
- kernel_dims.height == 1 ? 1U : 2U);
- }
-
- return output_tile;
-}
-
-bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_size)
-{
- // Check if we want to configure a Winograd configuration which requires fast math
- using WinogradConfiguration = std::pair<std::pair<int, int>, std::pair<int, int>>;
-
- std::vector<WinogradConfiguration> fast_math_winograd =
- {
- WinogradConfiguration(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5)),
- WinogradConfiguration(std::pair<int, int>(2, 2), std::pair<int, int>(7, 7))
- };
-
- auto p = std::make_pair(std::pair<int, int>(output_tile.width, output_tile.height),
- std::pair<int, int>(kernel_size.width, kernel_size.height));
-
- return std::find(fast_math_winograd.begin(), fast_math_winograd.end(), p) != fast_math_winograd.end();
-}
-} // namespace
+ const ICLTensor *src{ nullptr };
+ const ICLTensor *weights{ nullptr };
+ const ICLTensor *biases{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClWinogradConv2d> op{ nullptr };
+ ITensorPack run_pack{};
+ ITensorPack prep_pack{};
+ MemoryGroup memory_group{};
+ WorkspaceData<CLTensor> workspace_tensors{};
+ bool is_prepared{ false };
+};
CLWinogradConvolutionLayer::CLWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(memory_manager), _batched_mm(memory_manager), _input_transform(), _filter_transform(std::make_unique<CLWinogradFilterTransformKernel>()),
- _output_transform(std::make_unique<CLWinogradOutputTransformKernel>()), _input0(), _input1(), _batched_mm_output(), _original_weights(nullptr), _is_prepared(false)
+ : _impl(std::make_unique<Impl>())
{
+ _impl->memory_group = MemoryGroup(memory_manager);
}
CLWinogradConvolutionLayer::~CLWinogradConvolutionLayer() = default;
@@ -110,139 +65,45 @@ void CLWinogradConvolutionLayer::configure(const CLCompileContext &compile_conte
const PadStrideInfo &conv_info,
const ActivationLayerInfo &act_info, bool enable_fast_math)
{
- // Get indices for the width and height
- const size_t idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
- const size_t idx_height = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
+ _impl->src = input;
+ _impl->weights = weights;
+ _impl->biases = biases;
+ _impl->dst = output;
- // Input shape, kernel size and output tile
- const Size2D input_dims = Size2D(input->info()->tensor_shape()[idx_width], input->info()->tensor_shape()[idx_height]);
- const Size2D kernel_size = Size2D(weights->info()->tensor_shape()[idx_width], weights->info()->tensor_shape()[idx_height]);
- const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, input->info()->data_layout());
+ _impl->op = std::make_unique<opencl::ClWinogradConv2d>();
+ _impl->op->configure(compile_context, input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv_info, act_info, enable_fast_math);
- // Check if the Winograd configuration requires fast math
- if(!enable_fast_math)
+ _impl->run_pack =
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
- ARM_COMPUTE_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
- }
- const WinogradInfo winograd_info = WinogradInfo(output_tile,
- kernel_size,
- input_dims,
- conv_info,
- input->info()->data_layout());
-
- _is_prepared = false;
- _original_weights = weights;
-
- // Manage intermediate tensors
- _memory_group.manage(&_input0);
- _memory_group.manage(&_batched_mm_output);
-
- // Do not manage _input1 as it contains the weights
-
- // Configure input transform
- _input_transform.configure(compile_context, input, &_input0, winograd_info);
-
- // Configure filter transform
- _filter_transform->configure(compile_context, weights, &_input1, winograd_info);
-
- // Configure batched matrix multiply
- _batched_mm.configure(compile_context, &_input0, &_input1, nullptr, &_batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0, false, false,
- GEMMLowpOutputStageInfo(),
- (input->info()->data_type() == DataType::F16)));
-
- // Configure output transform
- _output_transform->configure(compile_context, &_batched_mm_output, biases, output, winograd_info, act_info);
+ { TensorType::ACL_SRC_0, _impl->src },
+ { TensorType::ACL_SRC_1, _impl->weights },
+ { TensorType::ACL_SRC_2, _impl->biases },
+ { TensorType::ACL_DST, _impl->dst }
+ };
- // Allocate temporary tensors
- _input0.allocator()->allocate();
- _batched_mm_output.allocator()->allocate();
+ _impl->prep_pack = { { TensorType::ACL_SRC_1, _impl->weights } };
+ _impl->workspace_tensors = manage_workspace<CLTensor>(_impl->op->workspace(), _impl->memory_group, _impl->run_pack, _impl->prep_pack);
}
Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
const ActivationLayerInfo &act_info, bool enable_fast_math)
{
- // Get indeces for the width and height
- const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
- const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
-
- // Input shape, kernel size and output tile
- const Size2D input_dims = Size2D(input->tensor_shape()[idx_width], input->tensor_shape()[idx_height]);
- const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
- const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, input->data_layout());
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_left() > (kernel_size.x() / 2u)) || (conv_info.pad_right() > (kernel_size.x() / 2u))), "Winograd only supports padding up to half kernel size");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_top() > (kernel_size.y() / 2u)) || (conv_info.pad_bottom() > (kernel_size.y() / 2u))), "Winograd only supports padding up to half kernel size");
-
- // Check if the Winograd configuration requires fast math
- if(!enable_fast_math)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
- }
-
- const WinogradInfo winograd_info = WinogradInfo(output_tile,
- kernel_size,
- input_dims,
- conv_info,
- input->data_layout());
-
- // Validate input transform
- const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
- const TensorInfo input0 = input->clone()->set_tensor_shape(input0_shape);
- ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransform::validate(input, &input0, winograd_info));
-
- // Validate filter transform
- const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
- const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
- ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradFilterTransformKernel::validate(weights, &input1, winograd_info));
-
- // Validate batched matrix multiply
- TensorShape batched_mm_output_shape = input0.tensor_shape();
- batched_mm_output_shape[0] = input1.tensor_shape()[0];
- const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
- ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input0, &input1, nullptr, &batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0, false, false,
- GEMMLowpOutputStageInfo(), (input->data_type() == DataType::F16))));
-
- // Configure output transform
- ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradOutputTransformKernel::validate(&batched_mm_output, biases, output, winograd_info, act_info));
-
- return Status{};
+ return opencl::ClWinogradConv2d::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math);
}
void CLWinogradConvolutionLayer::run()
{
+ MemoryGroupResourceScope scope_mg(_impl->memory_group);
prepare();
-
- MemoryGroupResourceScope scope_mg(_memory_group);
-
- // Run input transform
- _input_transform.run();
-
- // Run batched matrix multiplication
- _batched_mm.run();
-
- // Run output transform
- CLScheduler::get().enqueue(*_output_transform);
+ _impl->op->run(_impl->run_pack);
}
void CLWinogradConvolutionLayer::prepare()
{
- if(!_is_prepared)
+ if(!_impl->is_prepared)
{
- // Run filter transform and mark original weights as unused
- _input1.allocator()->allocate();
- CLScheduler::get().enqueue(*_filter_transform, false);
- _original_weights->mark_as_unused();
-
- // Prepare GEMM and release reshaped weights if marked unused by CLGEMM
- _batched_mm.prepare();
- if(!_input1.is_used())
- {
- _input1.allocator()->free();
- }
-
- CLScheduler::get().queue().finish();
- _is_prepared = true;
+ _impl->op->prepare(_impl->prep_pack);
+ _impl->is_prepared = true;
}
}
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLWinogradInputTransform.cpp b/src/runtime/CL/functions/CLWinogradInputTransform.cpp
deleted file mode 100644
index 6d5a692bc3..0000000000
--- a/src/runtime/CL/functions/CLWinogradInputTransform.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Error.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLWinogradInputTransformKernel.h"
-
-using namespace arm_compute;
-
-void CLWinogradInputTransform::configure(ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info);
-}
-
-void CLWinogradInputTransform::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- auto k = std::make_unique<CLWinogradInputTransformKernel>();
- k->configure(compile_context, input, output, winograd_info);
- _kernel = std::move(k);
- _border_handler->configure(compile_context, input, _kernel->border_size(), BorderMode::CONSTANT, PixelValue());
-}
-
-Status CLWinogradInputTransform::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransformKernel::validate(input, output, winograd_info));
- return Status{};
-}
diff --git a/src/runtime/gpu/cl/operators/ClWinogradConv2d.cpp b/src/runtime/gpu/cl/operators/ClWinogradConv2d.cpp
new file mode 100644
index 0000000000..c8db697778
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClWinogradConv2d.cpp
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/gpu/cl/operators/ClWinogradConv2d.h"
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h"
+#include "src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h"
+#include "src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/gpu/cl/utils/ClAuxTensorHandler.h"
+#include "support/Cast.h"
+
+using namespace arm_compute::experimental;
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace
+{
+Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims, DataLayout data_layout)
+{
+ Size2D output_tile = Size2D{};
+
+ const unsigned int kernel_max_dim = std::max(kernel_dims.width, kernel_dims.height);
+
+ // Check if the input spatial dimensions are smaller than 4
+ const bool is_input_lt4_nchw = (input_dims.width <= 4 && input_dims.height <= 4) && (data_layout == DataLayout::NCHW);
+
+ if(kernel_max_dim == 3U)
+ {
+ if(kernel_dims == Size2D(3U, 3U))
+ {
+ output_tile = is_input_lt4_nchw ? Size2D(2U, 2U) : Size2D(4U, 4U);
+ }
+ else if(kernel_dims == Size2D(3U, 1U))
+ {
+ output_tile = is_input_lt4_nchw ? Size2D(2U, 1U) : Size2D(4U, 1U);
+ }
+ else
+ {
+ output_tile = is_input_lt4_nchw ? Size2D(1U, 2U) : Size2D(1U, 4U);
+ }
+ }
+ else if(kernel_max_dim == 5U)
+ {
+ output_tile = Size2D(kernel_dims.width == 1 ? 1U : 4U,
+ kernel_dims.height == 1 ? 1U : 4U);
+ }
+ else if(kernel_max_dim == 7U)
+ {
+ output_tile = Size2D(kernel_dims.width == 1 ? 1U : 2U,
+ kernel_dims.height == 1 ? 1U : 2U);
+ }
+
+ return output_tile;
+}
+
+bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_size)
+{
+ // Check if we want to configure a Winograd configuration which requires fast math
+ using WinogradConfiguration = std::pair<std::pair<int, int>, std::pair<int, int>>;
+
+ std::vector<WinogradConfiguration> fast_math_winograd =
+ {
+ WinogradConfiguration(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5)),
+ WinogradConfiguration(std::pair<int, int>(2, 2), std::pair<int, int>(7, 7))
+ };
+
+ auto p = std::make_pair(std::pair<int, int>(output_tile.width, output_tile.height),
+ std::pair<int, int>(kernel_size.width, kernel_size.height));
+
+ return std::find(fast_math_winograd.begin(), fast_math_winograd.end(), p) != fast_math_winograd.end();
+}
+
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info, bool enable_fast_math)
+{
+ // Get indeces for the width and height
+ const size_t idx_width = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::HEIGHT);
+
+ // Input shape, kernel size and output tile
+ const Size2D input_dims = Size2D(src->tensor_shape()[idx_width], src->tensor_shape()[idx_height]);
+ const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
+ const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, src->data_layout());
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_left() > (kernel_size.x() / 2u)) || (conv_info.pad_right() > (kernel_size.x() / 2u))), "Winograd only supports padding up to half kernel size");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_top() > (kernel_size.y() / 2u)) || (conv_info.pad_bottom() > (kernel_size.y() / 2u))), "Winograd only supports padding up to half kernel size");
+
+ // Check if the Winograd configuration requires fast math
+ if(!enable_fast_math)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
+ }
+
+ const WinogradInfo winograd_info = WinogradInfo(output_tile,
+ kernel_size,
+ input_dims,
+ conv_info,
+ src->data_layout());
+
+ // Validate input transform
+ const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*src, winograd_info);
+ const TensorInfo input0 = src->clone()->set_tensor_shape(input0_shape);
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClWinogradInputTransformKernel::validate(src, &input0, winograd_info));
+
+ // Validate filter transform
+ const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
+ const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClWinogradFilterTransformKernel::validate(weights, &input1, winograd_info));
+
+ // Validate batched matrix multiply
+ TensorShape batched_mm_output_shape = input0.tensor_shape();
+ batched_mm_output_shape[0] = input1.tensor_shape()[0];
+ const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClGemm::validate(&input0, &input1, nullptr, &batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0, false, false,
+ GEMMLowpOutputStageInfo(), (src->data_type() == DataType::F16))));
+
+ // Configure output transform
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClWinogradOutputTransformKernel::validate(&batched_mm_output, biases, dst, winograd_info, act_info));
+ return Status{};
+}
+
+} // namespace
+
+ClWinogradConv2d::ClWinogradConv2d()
+ : _batched_mm(),
+ _input_transform(std::make_unique<kernels::ClWinogradInputTransformKernel>()),
+ _filter_transform(std::make_unique<kernels::ClWinogradFilterTransformKernel>()),
+ _output_transform(std::make_unique<kernels::ClWinogradOutputTransformKernel>()),
+ _border_handler(),
+ _input0(),
+ _input1(),
+ _batched_mm_output(),
+ _is_prepared(false),
+ _aux_mem()
+{
+}
+
+ClWinogradConv2d::~ClWinogradConv2d() = default;
+
+void ClWinogradConv2d::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, bool enable_fast_math)
+{
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, biases, dst, conv_info, act_info, enable_fast_math));
+ // Get indices for the width and height
+ const size_t idx_width = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::HEIGHT);
+
+ // Input shape, kernel size and output tile
+ const Size2D input_dims = Size2D(src->tensor_shape()[idx_width], src->tensor_shape()[idx_height]);
+ const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
+ const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, src->data_layout());
+
+ // Check if the Winograd configuration requires fast math
+ if(!enable_fast_math)
+ {
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
+ ARM_COMPUTE_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
+ }
+ const WinogradInfo winograd_info = WinogradInfo(output_tile,
+ kernel_size,
+ input_dims,
+ conv_info,
+ src->data_layout());
+
+ _is_prepared = false;
+
+ // Configure input transform
+ _input_transform->configure(compile_context, src, &_input0, winograd_info);
+ _border_handler.configure(compile_context, src, _input_transform->border_size(), BorderMode::CONSTANT, PixelValue());
+
+ // Configure filter transform
+ _filter_transform->configure(compile_context, weights, &_input1, winograd_info);
+
+ // Configure batched matrix multiply
+ _batched_mm.configure(compile_context, &_input0, &_input1, nullptr, &_batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0,
+ false, false,
+ GEMMLowpOutputStageInfo(),
+ (src->data_type() == DataType::F16)));
+
+ // Configure output transform
+ _output_transform->configure(compile_context, &_batched_mm_output, biases, dst, winograd_info, act_info);
+
+ _aux_mem = _batched_mm.workspace();
+ _aux_mem.push_back(MemoryInfo(offset_int_vec(2), MemoryLifetime::Temporary, _input0.total_size()));
+ _aux_mem.push_back(MemoryInfo(offset_int_vec(3), MemoryLifetime::Persistent, _input1.total_size()));
+ _aux_mem.push_back(MemoryInfo(offset_int_vec(4), MemoryLifetime::Temporary, _batched_mm_output.total_size()));
+}
+
+Status ClWinogradConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info, bool enable_fast_math)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, biases, dst, conv_info, act_info, enable_fast_math));
+ return Status{};
+}
+
+void ClWinogradConv2d::run(ITensorPack &tensors)
+{
+ prepare(tensors);
+
+ // Run input transform
+ auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
+ auto biases = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
+ CLAuxTensorHandler input0(offset_int_vec(2), _input0, tensors, true);
+ CLAuxTensorHandler input1(offset_int_vec(3), _input1, tensors, true);
+ CLAuxTensorHandler batched_mm_output(offset_int_vec(4), _batched_mm_output, tensors, true);
+
+ ITensorPack pack_it
+ {
+ { TensorType::ACL_SRC, src },
+ { TensorType::ACL_DST, input0.get() },
+ };
+ CLScheduler::get().enqueue_op(_border_handler, pack_it);
+ CLScheduler::get().enqueue_op(*_input_transform, pack_it);
+
+ // Run batched matrix multiplication
+ ITensorPack pack_mm
+ {
+ { TensorType::ACL_SRC_0, input0.get() },
+ { TensorType::ACL_SRC_1, input1.get() },
+ { TensorType::ACL_DST, batched_mm_output.get() },
+ };
+ _batched_mm.run(pack_mm);
+
+ // Run output transform
+ ITensorPack pack_ot
+ {
+ { TensorType::ACL_SRC_0, batched_mm_output.get() },
+ { TensorType::ACL_SRC_1, biases },
+ { TensorType::ACL_DST, dst },
+ };
+ CLScheduler::get().enqueue_op(*_output_transform, pack_ot);
+}
+
+void ClWinogradConv2d::prepare(ITensorPack &tensors)
+{
+ if(!_is_prepared)
+ {
+ auto weights = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
+ ICLTensor *in1_aux = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(offset_int_vec(3)));
+
+ CLAuxTensorHandler input1(_input1, *in1_aux);
+ ITensorPack pack_ft
+ {
+ { TensorType::ACL_SRC, weights },
+ { TensorType::ACL_DST, input1.get() },
+ };
+ // Run filter transform and mark original weights as unused
+ CLScheduler::get().enqueue_op(*_filter_transform, pack_ft, false);
+ weights->mark_as_unused();
+
+ tensors.add_tensor(ACL_SRC_1, input1.get());
+ // Prepare GEMM and release reshaped weights if marked unused by ClGemm
+ _batched_mm.prepare(tensors);
+
+ CLScheduler::get().queue().finish();
+ _is_prepared = true;
+ }
+}
+
+experimental::MemoryRequirements ClWinogradConv2d::workspace() const
+{
+ return _aux_mem;
+}
+} // namespace opencl
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/gpu/cl/operators/ClWinogradConv2d.h b/src/runtime/gpu/cl/operators/ClWinogradConv2d.h
new file mode 100644
index 0000000000..83b31f1c99
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClWinogradConv2d.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_WINOGRADCONV2D_H
+#define ARM_COMPUTE_CL_WINOGRADCONV2D_H
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+#include "src/runtime/gpu/cl/operators/ClGemm.h"
+
+namespace arm_compute
+{
+class CLCompileContext;
+class ITensorInfo;
+namespace opencl
+{
+namespace kernels
+{
+class ClWinogradInputTransformKernel;
+class ClWinogradFilterTransformKernel;
+class ClWinogradOutputTransformKernel;
+} // kernels
+/** Basic function to execute Winograd-based convolution on OpenCL. This function calls the following OpenCL functions/kernels:
+ *
+ * -# @ref kernels::ClWinogradInputTransformKernel
+ * -# @ref kernels::ClWinogradFilterTransformKernel (only once)
+ * -# @ref ClGemm
+ * -# @ref kernels::ClWinogradOutputTransformKernel
+ *
+ */
+class ClWinogradConv2d : public IClOperator
+{
+public:
+ /** Default constructor */
+ ClWinogradConv2d();
+ /** Default destructor */
+ ~ClWinogradConv2d();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ ClWinogradConv2d(const ClWinogradConv2d &) = delete;
+ /** Default move constructor */
+ ClWinogradConv2d(ClWinogradConv2d &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ ClWinogradConv2d &operator=(const ClWinogradConv2d &) = delete;
+ /** Default move assignment operator */
+ ClWinogradConv2d &operator=(ClWinogradConv2d &&) = default;
+ /** Set the input and output tensors.
+ *
+ * Valid data layouts:
+ * - NHWC
+ * - NCHW
+ *
+ * Valid data type configurations:
+ * |src0 |src1 |src2 |dst |
+ * |:--------------|:--------------|:------|:--------------|
+ * |F16 |F16 |F16 |F16 |
+ * |F32 |F32 |F32 |F32 |
+ *
+ * @note: This function only works with 3x3,3x1,1x3,5x5,5x1,1x5,7x1 and 1x7 kernels along with unit strides for both NCHW and NHWC data layout
+ * @note Some Winograd configurations (i.e. F(4x4, 5x5)) are supported only with enable_fast_math = true
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src Source tensor info. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F16/F32.
+ * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p src.
+ * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p src
+ * @param[out] dst Destination tensor info. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p src.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
+ * available which may introduce a drop of accuracy as well. Default is false
+ */
+ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClWinogradConv2d::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
+
+ // Inherited method overridden
+ void run(ITensorPack &tensors) override;
+ void prepare(ITensorPack &tensors) override;
+ experimental::MemoryRequirements workspace() const override;
+
+private:
+ ClGemm _batched_mm;
+ std::unique_ptr<kernels::ClWinogradInputTransformKernel> _input_transform;
+ std::unique_ptr<kernels::ClWinogradFilterTransformKernel> _filter_transform;
+ std::unique_ptr<kernels::ClWinogradOutputTransformKernel> _output_transform;
+ CLFillBorderKernel _border_handler;
+ TensorInfo _input0;
+ TensorInfo _input1;
+ TensorInfo _batched_mm_output;
+ bool _is_prepared;
+ experimental::MemoryRequirements _aux_mem{};
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_WINOGRADCONV2D_H */