aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2021-05-18 18:41:56 +0100
committerManuel Bottini <manuel.bottini@arm.com>2021-06-15 16:33:52 +0000
commitc6f4ec377027b21a67061efd21b65609079f98f9 (patch)
treed864f2092fff63790944fea7c8de5be46293bb43
parent94f799e8f6f605333d40472860fb472e8ba6d83d (diff)
downloadComputeLibrary-c6f4ec377027b21a67061efd21b65609079f98f9.tar.gz
Port CLWinogradConvolutionLayer with ClWinogradConv2d
Port CLWinogradInputTransformKernel Port CLWinogradFilterTransformKernel Port CLWinogradOutputTransformKernel Resolves: COMPMID-4504 Change-Id: I3177dda0b9c2f56b36cb317027e94abe8d47229e Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5680 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp8
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h1
-rw-r--r--arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h30
-rw-r--r--arm_compute/runtime/CL/functions/CLWinogradInputTransform.h111
-rw-r--r--docs/user_guide/release_version_and_change_log.dox10
-rw-r--r--filelist.json8
-rw-r--r--src/core/CL/CLKernels.h3
-rw-r--r--src/core/CL/kernels/CLWinogradFilterTransformKernel.h115
-rw-r--r--src/core/CL/kernels/CLWinogradInputTransformKernel.h121
-rw-r--r--src/core/CL/kernels/CLWinogradOutputTransformKernel.h127
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp (renamed from src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp)62
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h78
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp (renamed from src/core/CL/kernels/CLWinogradInputTransformKernel.cpp)101
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h88
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp (renamed from src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp)96
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h87
-rw-r--r--src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp223
-rw-r--r--src/runtime/CL/functions/CLWinogradInputTransform.cpp50
-rw-r--r--src/runtime/gpu/cl/operators/ClWinogradConv2d.cpp299
-rw-r--r--src/runtime/gpu/cl/operators/ClWinogradConv2d.h126
-rw-r--r--tests/validation/CL/Winograd.cpp523
21 files changed, 869 insertions, 1398 deletions
diff --git a/Android.bp b/Android.bp
index bafbe8fe89..45bc1be21b 100644
--- a/Android.bp
+++ b/Android.bp
@@ -131,9 +131,6 @@ cc_library_static {
"src/core/CL/kernels/CLStridedSliceKernel.cpp",
"src/core/CL/kernels/CLTileKernel.cpp",
"src/core/CL/kernels/CLWeightsReshapeKernel.cpp",
- "src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp",
- "src/core/CL/kernels/CLWinogradInputTransformKernel.cpp",
- "src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp",
"src/core/CPP/CPPTypes.cpp",
"src/core/CPP/ICPPSimpleKernel.cpp",
"src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp",
@@ -376,6 +373,9 @@ cc_library_static {
"src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp",
"src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp",
"src/core/gpu/cl/kernels/ClWidthConcatenateKernel.cpp",
+ "src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp",
+ "src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp",
+ "src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp",
"src/core/gpu/cl/kernels/gemm/ClGemmHelpers.cpp",
"src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp",
"src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.cpp",
@@ -507,7 +507,6 @@ cc_library_static {
"src/runtime/CL/functions/CLTranspose.cpp",
"src/runtime/CL/functions/CLUnstack.cpp",
"src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp",
- "src/runtime/CL/functions/CLWinogradInputTransform.cpp",
"src/runtime/CL/gemm/CLGEMMDefaultTypeBifrost.cpp",
"src/runtime/CL/gemm/CLGEMMDefaultTypeMidgard.cpp",
"src/runtime/CL/gemm/CLGEMMDefaultTypeValhall.cpp",
@@ -686,6 +685,7 @@ cc_library_static {
"src/runtime/gpu/cl/operators/ClSoftmax.cpp",
"src/runtime/gpu/cl/operators/ClSub.cpp",
"src/runtime/gpu/cl/operators/ClTranspose.cpp",
+ "src/runtime/gpu/cl/operators/ClWinogradConv2d.cpp",
"utils/CommonGraphOptions.cpp",
"utils/GraphUtils.cpp",
"utils/Utils.cpp",
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index 9d4b2fa050..62c94152e8 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -111,6 +111,5 @@
#include "arm_compute/runtime/CL/functions/CLTranspose.h"
#include "arm_compute/runtime/CL/functions/CLUnstack.h"
#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h"
#endif /* ARM_COMPUTE_CLFUNCTIONS_H */
diff --git a/arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h
index 7b42932f82..4b351267e3 100644
--- a/arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h
@@ -25,31 +25,29 @@
#define ARM_COMPUTE_CLWINOGRADCONVOLUTIONLAYER_H
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/functions/CLGEMM.h"
-#include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h"
#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+
+#include <memory>
namespace arm_compute
{
class CLCompileContext;
-class CLWinogradFilterTransformKernel;
-class CLWinogradOutputTransformKernel;
class ICLTensor;
class ITensorInfo;
/** Basic function to execute Winograd-based convolution on OpenCL. This function calls the following OpenCL functions/kernels:
*
- * -# @ref CLWinogradInputTransform
- * -# @ref CLWinogradFilterTransformKernel (only once)
- * -# @ref CLGEMM
- * -# @ref CLWinogradOutputTransformKernel
+ * -# @ref opencl::ClWinogradConv2d
*
*/
class CLWinogradConvolutionLayer : public IFunction
{
public:
- /** Default constructor */
+ /** Default Constructor */
CLWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Default Destructor */
+ ~CLWinogradConvolutionLayer();
/** Prevent instances of this class from being copied (As this class contains pointers) */
CLWinogradConvolutionLayer(const CLWinogradConvolutionLayer &) = delete;
/** Default move constructor */
@@ -58,8 +56,6 @@ public:
CLWinogradConvolutionLayer &operator=(const CLWinogradConvolutionLayer &) = delete;
/** Default move assignment operator */
CLWinogradConvolutionLayer &operator=(CLWinogradConvolutionLayer &&) = default;
- /** Default destructor */
- ~CLWinogradConvolutionLayer();
/** Set the input and output tensors.
*
* Valid data layouts:
@@ -136,16 +132,8 @@ public:
void prepare() override;
private:
- MemoryGroup _memory_group;
- CLGEMM _batched_mm;
- CLWinogradInputTransform _input_transform;
- std::unique_ptr<CLWinogradFilterTransformKernel> _filter_transform;
- std::unique_ptr<CLWinogradOutputTransformKernel> _output_transform;
- CLTensor _input0;
- CLTensor _input1;
- CLTensor _batched_mm_output;
- const ICLTensor *_original_weights;
- bool _is_prepared;
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_CLWINOGRADCONVOLUTIONLAYER_H */
diff --git a/arm_compute/runtime/CL/functions/CLWinogradInputTransform.h b/arm_compute/runtime/CL/functions/CLWinogradInputTransform.h
deleted file mode 100644
index d644591b57..0000000000
--- a/arm_compute/runtime/CL/functions/CLWinogradInputTransform.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLWINOGRADINPUTTRANSFORM_H
-#define ARM_COMPUTE_CLWINOGRADINPUTTRANSFORM_H
-
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-
-#include <cstdint>
-
-namespace arm_compute
-{
-class CLCompileContext;
-class ICLTensor;
-class ITensorInfo;
-
-/** Basic function to execute a @ref CLWinogradInputTransformKernel. */
-class CLWinogradInputTransform : public ICLSimpleFunction
-{
-public:
- /** Set the input and output tensors.
- *
- * Valid data layouts:
- * - NHWC
- * - NCHW
- *
- * Valid data type configurations:
- * |src |dst |
- * |:--------------|:--------------|
- * |F16 |F16 |
- * |F32 |F32 |
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input The input tensor to transform. Data types supported: F16,F32
- * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- */
- void configure(ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Set the input and output tensors.
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to transform. Data types supported: F16,F32
- * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- */
- void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradInputTransform.
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input The input tensor to transform. Data types supported: F16,F32
- * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info);
-};
-}
-#endif /*ARM_COMPUTE_CLWINOGRADINPUTTRANSFORM_H */
diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox
index 20995af693..3ffa11b045 100644
--- a/docs/user_guide/release_version_and_change_log.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -259,13 +259,13 @@ v20.11 Public major release
- CLElementwiseOperationKernel
- @ref CLBatchNormalizationLayerKernel
- CLPoolingLayerKernel
- - @ref CLWinogradInputTransformKernel
+ - CLWinogradInputTransformKernel
- @ref CLGEMMLowpMatrixMultiplyNativeKernel
- @ref CLGEMMLowpMatrixAReductionKernel
- @ref CLGEMMLowpMatrixBReductionKernel
- @ref CLGEMMLowpOffsetContributionOutputStageKernel
- @ref CLGEMMLowpOffsetContributionKernel
- - @ref CLWinogradOutputTransformKernel
+ - CLWinogradOutputTransformKernel
- @ref CLGEMMLowpMatrixMultiplyReshapedKernel
- @ref CLFuseBatchNormalizationKernel
- @ref CLDepthwiseConvolutionLayerNativeKernel
@@ -273,7 +273,7 @@ v20.11 Public major release
- CLCopyKernel
- @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
- CLActivationLayerKernel
- - @ref CLWinogradFilterTransformKernel
+ - CLWinogradFilterTransformKernel
- CLWidthConcatenateLayerKernel
- CLWidthConcatenate4TensorsKernel
- CLWidthConcatenate2TensorsKernel
@@ -1102,8 +1102,8 @@ v18.05 Public major release
- @ref CLLSTMLayer
- @ref CLRNNLayer
- CLWidthConcatenateLayer / CLWidthConcatenateLayerKernel
- - @ref CLWinogradFilterTransformKernel / @ref CLWinogradInputTransformKernel / @ref CLWinogradConvolutionLayer
- - @ref CLWinogradInputTransformKernel / @ref CLWinogradInputTransform
+ - CLWinogradFilterTransformKernel / @ref CLWinogradConvolutionLayer
+ - CLWinogradInputTransformKernel / CLWinogradInputTransform
- New Arm® Neon™ kernels / functions:
- NEConvertFullyConnectedWeightsKernel / @ref NEConvertFullyConnectedWeights.
- Created the validate method in @ref CLDepthwiseConvolutionLayer.
diff --git a/filelist.json b/filelist.json
index d84a350a82..c8b4574c21 100644
--- a/filelist.json
+++ b/filelist.json
@@ -59,6 +59,9 @@
"src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp",
"src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp",
"src/core/gpu/cl/kernels/ClWidthConcatenateKernel.cpp",
+ "src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp",
+ "src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp",
+ "src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp",
"src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp",
"src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp",
"src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp",
@@ -113,10 +116,7 @@
"src/core/CL/kernels/CLStackLayerKernel.cpp",
"src/core/CL/kernels/CLStridedSliceKernel.cpp",
"src/core/CL/kernels/CLTileKernel.cpp",
- "src/core/CL/kernels/CLWeightsReshapeKernel.cpp",
- "src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp",
- "src/core/CL/kernels/CLWinogradInputTransformKernel.cpp",
- "src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp"
+ "src/core/CL/kernels/CLWeightsReshapeKernel.cpp"
]
}
}
diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h
index c59eebacbb..5dc95dae27 100644
--- a/src/core/CL/CLKernels.h
+++ b/src/core/CL/CLKernels.h
@@ -80,8 +80,5 @@
#include "src/core/CL/kernels/CLStridedSliceKernel.h"
#include "src/core/CL/kernels/CLTileKernel.h"
#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
-#include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h"
-#include "src/core/CL/kernels/CLWinogradInputTransformKernel.h"
-#include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h"
#endif /* ARM_COMPUTE_CLKERNELS_H */
diff --git a/src/core/CL/kernels/CLWinogradFilterTransformKernel.h b/src/core/CL/kernels/CLWinogradFilterTransformKernel.h
deleted file mode 100644
index d22fedebcd..0000000000
--- a/src/core/CL/kernels/CLWinogradFilterTransformKernel.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLWINOGRADFILTERTRANSFORMKERNEL_H
-#define ARM_COMPUTE_CLWINOGRADFILTERTRANSFORMKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the Winograd filter transform kernel. */
-class CLWinogradFilterTransformKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLWinogradFilterTransformKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradFilterTransformKernel(const CLWinogradFilterTransformKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradFilterTransformKernel &operator=(const CLWinogradFilterTransformKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLWinogradFilterTransformKernel(CLWinogradFilterTransformKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLWinogradFilterTransformKernel &operator=(CLWinogradFilterTransformKernel &&) = default;
- /** Default destructor */
- ~CLWinogradFilterTransformKernel() = default;
- /** Set the input and output tensor.
- *
- * @note Winograd filter transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd filter transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input Source tensor. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout) or [IFM, kernel_x, kernel_y, OFM] (NHWC data layout). Data types supported: F16/F32.
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_filter_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- */
- void configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Set the input and output tensor.
- *
- * @note Winograd filter transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd filter transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout) or [IFM, kernel_x, kernel_y, OFM] (NHWC data layout). Data types supported: F16/F32.
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_filter_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradFilterTransformKernel
- *
- * @note Winograd filter transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd filter transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input Source tensor. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout) or [IFM, kernel_x, kernel_y, OFM] (NHWC data layout). Data types supported: F16/F32.
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_filter_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLWINOGRADFILTERTRANSFORMKERNEL_H */
diff --git a/src/core/CL/kernels/CLWinogradInputTransformKernel.h b/src/core/CL/kernels/CLWinogradInputTransformKernel.h
deleted file mode 100644
index 25301877e6..0000000000
--- a/src/core/CL/kernels/CLWinogradInputTransformKernel.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLWINOGRADINPUTTRANSFORMKERNEL_H
-#define ARM_COMPUTE_CLWINOGRADINPUTTRANSFORMKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** OpenCL kernel to perform Winograd input transform.*/
-class CLWinogradInputTransformKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLWinogradInputTransformKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradInputTransformKernel(const CLWinogradInputTransformKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradInputTransformKernel &operator=(const CLWinogradInputTransformKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLWinogradInputTransformKernel(CLWinogradInputTransformKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLWinogradInputTransformKernel &operator=(CLWinogradInputTransformKernel &&) = default;
- /** Set the input and output of the kernel.
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input The input tensor to transform. Data types supported: F16/F32
- * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- */
- void configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Set the input and output of the kernel.
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to transform. Data types supported: F16/F32
- * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradInputTransformKernel
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input The input tensor to transform. Data types supported: F16/F32
- * @param[in] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-private:
- using WinogradKey = std::pair<std::pair<int, int>, std::pair<int, int>>;
-
- BorderSize _border_size;
- const ICLTensor *_input;
- ICLTensor *_output;
- DataLayout _data_layout;
- int _num_tiles_x;
- int _num_tiles_y;
- unsigned int _step_z;
-};
-} // arm_compute
-#endif /*ARM_COMPUTE_CLWINOGRADINPUTTRANSFORMKERNEL_H */
diff --git a/src/core/CL/kernels/CLWinogradOutputTransformKernel.h b/src/core/CL/kernels/CLWinogradOutputTransformKernel.h
deleted file mode 100644
index 632a5629d9..0000000000
--- a/src/core/CL/kernels/CLWinogradOutputTransformKernel.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLWINOGRADOUTPUTTRANSFORMKERNEL_H
-#define ARM_COMPUTE_CLWINOGRADOUTPUTTRANSFORMKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the Winograd output transform kernel. */
-class CLWinogradOutputTransformKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLWinogradOutputTransformKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradOutputTransformKernel(const CLWinogradOutputTransformKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWinogradOutputTransformKernel &operator=(const CLWinogradOutputTransformKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLWinogradOutputTransformKernel(CLWinogradOutputTransformKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLWinogradOutputTransformKernel &operator=(CLWinogradOutputTransformKernel &&) = default;
- /** Default destructor */
- ~CLWinogradOutputTransformKernel() = default;
- /** Set the input and output tensor.
- *
- * @note Winograd output transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd output transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input Source tensor with shape [C, N, K, batches]. Data types supported: F16/F32.
- * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_output_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Set the input and output tensor.
- *
- * @note Winograd output transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd output transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Source tensor with shape [C, N, K, batches]. Data types supported: F16/F32.
- * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_output_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info,
- const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradOutputTransformKernel
- *
- * @note Winograd output transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd output transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] input Source tensor with shape [C, N, K, batches]. Data types supported: F16/F32.
- * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
- * @param[out] output The output tensor. The shape for this tensor can be calculated using the utility function @p compute_winograd_output_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation @ref ActivationLayerInfo. Only RELU, BOUNDED_RELU, LU_BOUNDED_RELU, LEAKY_RELU and SOFT_RELU supported.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- using WinogradKey = std::pair<std::pair<int, int>, std::pair<int, int>>;
-
- const ICLTensor *_input;
- const ICLTensor *_bias;
- ICLTensor *_output;
- bool _is_nhwc;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLWINOGRADOUTPUTTRANSFORMKERNEL_H */
diff --git a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp b/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp
index 138f4cf947..381b4bcae9 100644
--- a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h"
+#include "src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -36,13 +36,17 @@
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
+#include "support/Cast.h"
#include "support/StringSupport.h"
using namespace arm_compute::misc::shape_calculator;
namespace arm_compute
{
+namespace opencl
+{
+namespace kernels
+{
namespace
{
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
@@ -87,69 +91,61 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
}
} // namespace
-CLWinogradFilterTransformKernel::CLWinogradFilterTransformKernel()
- : _input(nullptr), _output(nullptr)
-{
-}
-
-void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
+void ClWinogradFilterTransformKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const WinogradInfo &winograd_info)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info);
-}
-
-void CLWinogradFilterTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
// Output auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input->info(), winograd_info)));
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*src, winograd_info)));
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info));
- auto padding_info = get_padding_info({ input, output });
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, winograd_info));
+ auto padding_info = get_padding_info({ src, dst });
// Set build options
CLBuildOptions build_opts;
- build_opts.add_option("-DSRC_DIM_Z=" + support::cpp11::to_string(input->info()->dimension(2)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DSRC_DIM_Z=" + support::cpp11::to_string(src->dimension(2)));
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL");
build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_FILTER_TRANSFORM_VERTICAL");
const Size2D kernel_size = winograd_info.kernel_size;
const Size2D output_tile_size = winograd_info.output_tile_size;
// Create kernel
- std::string kernel_name = "winograd_filter_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_" + lower_string(string_from_data_layout(input->info()->data_layout()));
+ std::string kernel_name = "winograd_filter_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_" + lower_string(string_from_data_layout(src->data_layout()));
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- _input = input;
- _output = output;
-
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info());
+ auto win_config = validate_and_configure_window(src, dst);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ IClKernel::configure_internal(win_config.second);
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLWinogradFilterTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
+Status ClWinogradFilterTransformKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const WinogradInfo &winograd_info)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, winograd_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, winograd_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get()).first);
return Status{};
}
-void CLWinogradFilterTransformKernel::run(const Window &window, cl::CommandQueue &queue)
+void ClWinogradFilterTransformKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IClKernel::window(), window);
+
+ auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
// Setup output window
Window window_out;
- window_out.use_tensor_dimensions(_output->info()->tensor_shape(), 0);
+ window_out.use_tensor_dimensions(dst->info()->tensor_shape(), 0);
unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, window);
- add_3D_tensor_argument(idx, _output, window_out);
+ add_4D_tensor_argument(idx, src, window);
+ add_3D_tensor_argument(idx, dst, window_out);
enqueue(queue, *this, window, lws_hint());
}
+} // namespace kernels
+} // namespace opencl
} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h b/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h
new file mode 100644
index 0000000000..2bc2ceb36e
--- /dev/null
+++ b/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_WINOGRAD_FILTER_TRANSFORM_KERNEL_H
+#define ARM_COMPUTE_CL_WINOGRAD_FILTER_TRANSFORM_KERNEL_H
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/common/Macros.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+/** Interface for the Winograd filter transform kernel. */
+class ClWinogradFilterTransformKernel : public IClKernel
+{
+public:
+ /** Default constructor */
+ ClWinogradFilterTransformKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWinogradFilterTransformKernel);
+ /** Set the input and output tensor.
+ *
+ * @note Winograd filter transform supports the following configurations for NCWH data layout
+ * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
+ * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
+ * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
+ *
+ * @note Winograd filter transform supports the following configurations for NHWC data layout
+ * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
+ * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
+ *
+ * Strides: only unit strides
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src Source tensor info. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout) or [IFM, kernel_x, kernel_y, OFM] (NHWC data layout). Data types supported: F16/F32.
+ * @param[out] dst The output tensor info. The shape for this tensor can be calculated using the utility function @p compute_winograd_filter_transform_shape. Data types supported: Same as @p input
+ * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
+ */
+ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const WinogradInfo &winograd_info);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClWinogradFilterTransformKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const WinogradInfo &winograd_info);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_CL_WINOGRAD_FILTER_TRANSFORM_KERNEL_H */
diff --git a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp
index 3399f47d5f..17f0eb9e2c 100644
--- a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/CL/kernels/CLWinogradInputTransformKernel.h"
+#include "src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -36,10 +36,15 @@
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
+#include "support/Cast.h"
#include "support/StringSupport.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
namespace
{
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
@@ -95,69 +100,62 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
}
} // namespace
-CLWinogradInputTransformKernel::CLWinogradInputTransformKernel()
- : _border_size(0), _input(nullptr), _output(nullptr), _data_layout(DataLayout::UNKNOWN), _num_tiles_x(0), _num_tiles_y(0), _step_z(1)
+ClWinogradInputTransformKernel::ClWinogradInputTransformKernel()
+ : _border_size(0), _data_layout(DataLayout::UNKNOWN), _num_tiles_x(0), _num_tiles_y(0), _step_z(1)
{
}
-BorderSize CLWinogradInputTransformKernel::border_size() const
+BorderSize ClWinogradInputTransformKernel::border_size() const
{
return _border_size;
}
-void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info);
-}
-
-void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
+void ClWinogradInputTransformKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const WinogradInfo &winograd_info)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, winograd_info));
- auto padding_info = get_padding_info({ input, output });
+ auto padding_info = get_padding_info({ src, dst });
const PadStrideInfo conv_info = winograd_info.convolution_info;
const Size2D output_tile_size = winograd_info.output_tile_size;
const Size2D kernel_size = winograd_info.kernel_size;
- _data_layout = input->info()->data_layout();
+ _data_layout = src->data_layout();
const size_t idx_w = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
const size_t idx_h = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
// Compute the number of output tiles along the x and y direction of size "output_tile_size"
- const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input->info()->dimension(idx_w), input->info()->dimension(idx_h)),
+ const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(src->dimension(idx_w), src->dimension(idx_h)),
kernel_size,
output_tile_size,
conv_info);
- _input = input;
- _output = output;
_num_tiles_x = num_tiles.width;
_num_tiles_y = num_tiles.height;
- const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input->info(), winograd_info);
+ const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*src, winograd_info);
// Output auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(output_shape));
- ARM_COMPUTE_ERROR_ON(_num_tiles_x * _num_tiles_y != static_cast<int>(output->info()->dimension(1)));
- const size_t total_batches = input->info()->tensor_shape().total_size_upper(3);
+ ARM_COMPUTE_ERROR_ON(_num_tiles_x * _num_tiles_y != static_cast<int>(dst->dimension(1)));
+ const size_t total_batches = src->tensor_shape().total_size_upper(3);
CLBuildOptions build_opts;
if(_data_layout == DataLayout::NHWC)
{
build_opts.add_option("-DNHWC");
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_w)));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(idx_h)));
+ build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(idx_w)));
+ build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_h)));
build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(_num_tiles_x));
build_opts.add_option("-DNUM_TILES_Y=" + support::cpp11::to_string(_num_tiles_y));
build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL");
build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_INPUT_TRANSFORM_VERTICAL");
}
@@ -168,10 +166,10 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL");
build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_INPUT_TRANSFORM_VERTICAL");
- build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(_input->info()->dimension(2)));
+ build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(src->dimension(2)));
}
// Create kernel
@@ -183,7 +181,7 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
// Check optimized kernel if output_dims == 2x2
if((tile_max_dim == 2) && (_data_layout == DataLayout::NCHW))
{
- _step_z = (_input->info()->dimension(2) % 2) != 0 ? 1 : 2;
+ _step_z = (src->dimension(2) % 2) != 0 ? 1 : 2;
}
// Append stepz and data layout
@@ -194,20 +192,20 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Create window and update padding
- auto win_config = validate_and_configure_window(input->info(), output->info(), winograd_info);
+ auto win_config = validate_and_configure_window(src, dst, winograd_info);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second, cl::NDRange(1, 1, 8));
+ IClKernel::configure_internal(win_config.second, cl::NDRange(1, 1, 8));
- _border_size = BorderSize(_input->info()->padding());
+ _border_size = BorderSize(src->padding());
- ARM_COMPUTE_ERROR_ON((input->info()->data_layout() == DataLayout::NHWC) && has_padding_changed(padding_info));
+ ARM_COMPUTE_ERROR_ON((src->data_layout() == DataLayout::NHWC) && has_padding_changed(padding_info));
_config_id = kernel_name;
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
+ _config_id += support::cpp11::to_string(src->dimension(0));
_config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
+ _config_id += support::cpp11::to_string(src->dimension(1));
_config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(2));
+ _config_id += support::cpp11::to_string(src->dimension(2));
_config_id += "_";
_config_id += support::cpp11::to_string(conv_info.pad_left());
_config_id += "_";
@@ -216,27 +214,29 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
_config_id += lower_string(string_from_data_layout(_data_layout));
}
-Status CLWinogradInputTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
+Status ClWinogradInputTransformKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const WinogradInfo &winograd_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, winograd_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), winograd_info).first);
-
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, winograd_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(), winograd_info).first);
return Status{};
}
-void CLWinogradInputTransformKernel::run(const Window &window, cl::CommandQueue &queue)
+void ClWinogradInputTransformKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+ auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
const size_t idx_w = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
const size_t idx_h = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
const size_t idx_c = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
const size_t total_batches = window.shape().total_size_upper(3);
// Collapse window
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+ Window window_collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ);
if(_data_layout == DataLayout::NHWC)
{
@@ -245,8 +245,8 @@ void CLWinogradInputTransformKernel::run(const Window &window, cl::CommandQueue
slice.set(2, Window::Dimension(0, total_batches, 1));
unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, slice);
- add_4D_tensor_argument(idx, _output, slice);
+ add_4D_tensor_argument(idx, src, slice);
+ add_4D_tensor_argument(idx, dst, slice);
enqueue(queue, *this, slice, lws_hint());
}
else
@@ -259,17 +259,20 @@ void CLWinogradInputTransformKernel::run(const Window &window, cl::CommandQueue
slice.set(idx_c, Window::Dimension(slice[idx_c].start(), slice[idx_c].end(), _step_z));
unsigned int idx = 2 * num_arguments_per_3D_tensor();
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input->info()->strides_in_bytes()[3]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[3]));
+ _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src->info()->strides_in_bytes()[3]));
+ _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(dst->info()->strides_in_bytes()[3]));
do
{
unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _output, slice);
+ add_3D_tensor_argument(idx, src, slice);
+ add_3D_tensor_argument(idx, dst, slice);
enqueue(queue, *this, slice, lws_hint());
}
while(window_collapsed.slide_window_slice_3D(slice));
}
}
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h
new file mode 100644
index 0000000000..76b45279a4
--- /dev/null
+++ b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_WINOGRAD_INPUT_TRANSFORM_KERNEL_H
+#define ARM_COMPUTE_CL_WINOGRAD_INPUT_TRANSFORM_KERNEL_H
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/common/Macros.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+/** OpenCL kernel to perform Winograd input transform.*/
+class ClWinogradInputTransformKernel : public IClKernel
+{
+public:
+ /** Default constructor */
+ ClWinogradInputTransformKernel();
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWinogradInputTransformKernel);
+ /** Set the input and output of the kernel.
+ *
+ * @note Winograd input transform supports the following configurations for NCWH data layout
+ * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
+ * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
+ * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
+ *
+ * @note Winograd input transform supports the following configurations for NHWC data layout
+ * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
+ * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
+ *
+ * Strides: only unit strides
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src The input tensor info to transform. Data types supported: F16/F32
+ * @param[in] dst The output tensor info. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
+ * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
+ */
+ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const WinogradInfo &winograd_info);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClWinogradInputTransformKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const WinogradInfo &winograd_info);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
+ BorderSize border_size() const override;
+
+private:
+ using WinogradKey = std::pair<std::pair<int, int>, std::pair<int, int>>;
+
+ BorderSize _border_size;
+ DataLayout _data_layout;
+ int _num_tiles_x;
+ int _num_tiles_y;
+ unsigned int _step_z;
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_CL_WINOGRAD_INPUT_TRANSFORM_KERNEL_H */
diff --git a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp b/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp
index 965bf9df77..a6c05420ed 100644
--- a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h"
+#include "src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -38,15 +38,19 @@
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
+#include "support/Cast.h"
#include "support/StringSupport.h"
#include <cmath>
-namespace arm_compute
-{
using namespace arm_compute::misc::shape_calculator;
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
namespace
{
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
@@ -118,36 +122,23 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
}
} // namespace
-CLWinogradOutputTransformKernel::CLWinogradOutputTransformKernel()
- : _input(nullptr), _bias(nullptr), _output(nullptr), _is_nhwc(false)
-{
-}
-
-void CLWinogradOutputTransformKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, winograd_info, act_info);
-}
-
-void CLWinogradOutputTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info,
+void ClWinogradOutputTransformKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const WinogradInfo &winograd_info,
const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
// Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*input->info(), winograd_info)));
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*src, winograd_info)));
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), winograd_info, act_info));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, bias, dst, winograd_info, act_info));
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), winograd_info.output_tile_size);
+ auto win_config = validate_and_configure_window(src, bias, dst, winograd_info.output_tile_size);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ IClKernel::configure_internal(win_config.second);
- auto padding_info = get_padding_info({ input, bias, output });
+ auto padding_info = get_padding_info({ src, bias, dst });
- _input = input;
- _bias = bias;
- _output = output;
_is_nhwc = winograd_info.output_data_layout == DataLayout::NHWC;
// Compute num_tiles_x
@@ -163,7 +154,7 @@ void CLWinogradOutputTransformKernel::configure(const CLCompileContext &compile_
kernel_size,
output_tile_size,
conv_info);
- const size_t total_batches = output->info()->tensor_shape().total_size_upper(3);
+ const size_t total_batches = dst->tensor_shape().total_size_upper(3);
// Set build options
CLBuildOptions build_opts;
@@ -180,17 +171,17 @@ void CLWinogradOutputTransformKernel::configure(const CLCompileContext &compile_
build_opts.add_option("-DVEC_SIZE=4");
}
- build_opts.add_option_if(_bias != nullptr, std::string("-DHAS_BIAS"));
+ build_opts.add_option_if(bias != nullptr, std::string("-DHAS_BIAS"));
build_opts.add_option("-cl-fast-relaxed-math");
build_opts.add_option("-DN0=" + support::cpp11::to_string(win_config.second.x().step()));
build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(num_tiles.width));
build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(_input->info()->dimension(1)));
- build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(idx_width)));
- build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(idx_height)));
- build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(_input->info()->dimension(2)));
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
+ build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(1)));
+ build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(dst->dimension(idx_width)));
+ build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(idx_height)));
+ build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(src->dimension(2)));
build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL");
build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL");
@@ -201,36 +192,39 @@ void CLWinogradOutputTransformKernel::configure(const CLCompileContext &compile_
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
_config_id += "_";
- _config_id += lower_string(string_from_data_type(input->info()->data_type()));
+ _config_id += lower_string(string_from_data_type(src->data_type()));
_config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(0));
+ _config_id += support::cpp11::to_string(src->dimension(0));
_config_id += "_";
- _config_id += support::cpp11::to_string(input->info()->dimension(1));
+ _config_id += support::cpp11::to_string(src->dimension(1));
_config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
+ _config_id += support::cpp11::to_string(dst->dimension(0));
_config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
+ _config_id += support::cpp11::to_string(dst->dimension(1));
_config_id += "_";
_config_id += lower_string(string_from_data_layout(winograd_info.output_data_layout));
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info) && _is_nhwc);
}
-Status CLWinogradOutputTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+Status ClWinogradOutputTransformKernel::validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, (bias != nullptr ? bias->clone().get() : nullptr), output, winograd_info, act_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), (bias != nullptr ? bias->clone().get() : nullptr), output->clone().get(), winograd_info.output_tile_size).first);
-
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, (bias != nullptr ? bias->clone().get() : nullptr), dst, winograd_info, act_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), (bias != nullptr ? bias->clone().get() : nullptr), dst->clone().get(), winograd_info.output_tile_size).first);
return Status{};
}
-void CLWinogradOutputTransformKernel::run(const Window &window, cl::CommandQueue &queue)
+void ClWinogradOutputTransformKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IClKernel::window(), window);
+
+ auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
+ auto bias = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
// Collapse window
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+ Window window_collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ);
// Get initial windows
Window slice = window_collapsed.first_slice_window_4D();
@@ -241,27 +235,29 @@ void CLWinogradOutputTransformKernel::run(const Window &window, cl::CommandQueue
slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
- if(_bias != nullptr)
+ if(bias != nullptr)
{
unsigned int idx1 = 2 * num_arguments_per_4D_tensor();
Window slice_biases;
- slice_biases.use_tensor_dimensions(_bias->info()->tensor_shape());
- add_1D_tensor_argument(idx1, _bias, slice_biases);
+ slice_biases.use_tensor_dimensions(bias->info()->tensor_shape());
+ add_1D_tensor_argument(idx1, bias, slice_biases);
}
if(_is_nhwc)
{
- unsigned int idx2 = 2 * num_arguments_per_4D_tensor() + ((_bias != nullptr) ? num_arguments_per_1D_tensor() : 0);
- _kernel.setArg(idx2, static_cast<int>(_output->info()->total_size() - _output->info()->strides_in_bytes().y()));
+ unsigned int idx2 = 2 * num_arguments_per_4D_tensor() + ((bias != nullptr) ? num_arguments_per_1D_tensor() : 0);
+ _kernel.setArg(idx2, static_cast<int>(dst->info()->total_size() - dst->info()->strides_in_bytes().y()));
}
do
{
unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, slice);
- add_4D_tensor_argument(idx, _output, slice_out);
+ add_4D_tensor_argument(idx, src, slice);
+ add_4D_tensor_argument(idx, dst, slice_out);
enqueue(queue, *this, slice, lws_hint());
}
while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_out));
}
+} // namespace kernels
+} // namespace opencl
} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h b/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h
new file mode 100644
index 0000000000..48b27e658c
--- /dev/null
+++ b/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_WINOGRAD_OUTPUT_TRANSFORM_KERNEL_H
+#define ARM_COMPUTE_CL_WINOGRAD_OUTPUT_TRANSFORM_KERNEL_H
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/common/Macros.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+/** Interface for the Winograd output transform kernel. */
+class ClWinogradOutputTransformKernel : public IClKernel
+{
+public:
+ /** Default constructor */
+ ClWinogradOutputTransformKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWinogradOutputTransformKernel);
+ /** Set the input and output tensor.
+ *
+ * @note Winograd output transform supports the following configurations for NCWH data layout
+ * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
+ * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
+ * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
+ *
+ * @note Winograd output transform supports the following configurations for NHWC data layout
+ * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
+ * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
+ *
+ * Strides: only unit strides
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src Source tensor info with shape [C, N, K, batches]. Data types supported: F16/F32.
+ * @param[in] bias Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p src
+ * @param[out] dst The output tensor info. The shape for this tensor can be calculated using the utility function @p compute_winograd_output_transform_shape. Data types supported: Same as @p src
+ * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ */
+ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const WinogradInfo &winograd_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClWinogradOutputTransformKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ using WinogradKey = std::pair<std::pair<int, int>, std::pair<int, int>>;
+
+ bool _is_nhwc{ false };
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_CL_WINOGRAD_OUTPUT_TRANSFORM_KERNEL_H */
diff --git a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
index 6b8b00414a..f758c3d0b3 100644
--- a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
@@ -23,79 +23,34 @@
*/
#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h"
-#include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h"
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/gpu/cl/operators/ClWinogradConv2d.h"
+#include "support/Cast.h"
-using namespace arm_compute;
-
-namespace
+namespace arm_compute
{
-Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims, DataLayout data_layout)
+struct CLWinogradConvolutionLayer::Impl
{
- Size2D output_tile = Size2D{};
-
- const unsigned int kernel_max_dim = std::max(kernel_dims.width, kernel_dims.height);
-
- // Check if the input spatial dimensions are smaller than 4
- const bool is_input_lt4_nchw = (input_dims.width <= 4 && input_dims.height <= 4) && (data_layout == DataLayout::NCHW);
-
- if(kernel_max_dim == 3U)
- {
- if(kernel_dims == Size2D(3U, 3U))
- {
- output_tile = is_input_lt4_nchw ? Size2D(2U, 2U) : Size2D(4U, 4U);
- }
- else if(kernel_dims == Size2D(3U, 1U))
- {
- output_tile = is_input_lt4_nchw ? Size2D(2U, 1U) : Size2D(4U, 1U);
- }
- else
- {
- output_tile = is_input_lt4_nchw ? Size2D(1U, 2U) : Size2D(1U, 4U);
- }
- }
- else if(kernel_max_dim == 5U)
- {
- output_tile = Size2D(kernel_dims.width == 1 ? 1U : 4U,
- kernel_dims.height == 1 ? 1U : 4U);
- }
- else if(kernel_max_dim == 7U)
- {
- output_tile = Size2D(kernel_dims.width == 1 ? 1U : 2U,
- kernel_dims.height == 1 ? 1U : 2U);
- }
-
- return output_tile;
-}
-
-bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_size)
-{
- // Check if we want to configure a Winograd configuration which requires fast math
- using WinogradConfiguration = std::pair<std::pair<int, int>, std::pair<int, int>>;
-
- std::vector<WinogradConfiguration> fast_math_winograd =
- {
- WinogradConfiguration(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5)),
- WinogradConfiguration(std::pair<int, int>(2, 2), std::pair<int, int>(7, 7))
- };
-
- auto p = std::make_pair(std::pair<int, int>(output_tile.width, output_tile.height),
- std::pair<int, int>(kernel_size.width, kernel_size.height));
-
- return std::find(fast_math_winograd.begin(), fast_math_winograd.end(), p) != fast_math_winograd.end();
-}
-} // namespace
+ const ICLTensor *src{ nullptr };
+ const ICLTensor *weights{ nullptr };
+ const ICLTensor *biases{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClWinogradConv2d> op{ nullptr };
+ ITensorPack run_pack{};
+ ITensorPack prep_pack{};
+ MemoryGroup memory_group{};
+ WorkspaceData<CLTensor> workspace_tensors{};
+ bool is_prepared{ false };
+};
CLWinogradConvolutionLayer::CLWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(memory_manager), _batched_mm(memory_manager), _input_transform(), _filter_transform(std::make_unique<CLWinogradFilterTransformKernel>()),
- _output_transform(std::make_unique<CLWinogradOutputTransformKernel>()), _input0(), _input1(), _batched_mm_output(), _original_weights(nullptr), _is_prepared(false)
+ : _impl(std::make_unique<Impl>())
{
+ _impl->memory_group = MemoryGroup(memory_manager);
}
CLWinogradConvolutionLayer::~CLWinogradConvolutionLayer() = default;
@@ -110,139 +65,45 @@ void CLWinogradConvolutionLayer::configure(const CLCompileContext &compile_conte
const PadStrideInfo &conv_info,
const ActivationLayerInfo &act_info, bool enable_fast_math)
{
- // Get indices for the width and height
- const size_t idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
- const size_t idx_height = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
+ _impl->src = input;
+ _impl->weights = weights;
+ _impl->biases = biases;
+ _impl->dst = output;
- // Input shape, kernel size and output tile
- const Size2D input_dims = Size2D(input->info()->tensor_shape()[idx_width], input->info()->tensor_shape()[idx_height]);
- const Size2D kernel_size = Size2D(weights->info()->tensor_shape()[idx_width], weights->info()->tensor_shape()[idx_height]);
- const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, input->info()->data_layout());
+ _impl->op = std::make_unique<opencl::ClWinogradConv2d>();
+ _impl->op->configure(compile_context, input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv_info, act_info, enable_fast_math);
- // Check if the Winograd configuration requires fast math
- if(!enable_fast_math)
+ _impl->run_pack =
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
- ARM_COMPUTE_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
- }
- const WinogradInfo winograd_info = WinogradInfo(output_tile,
- kernel_size,
- input_dims,
- conv_info,
- input->info()->data_layout());
-
- _is_prepared = false;
- _original_weights = weights;
-
- // Manage intermediate tensors
- _memory_group.manage(&_input0);
- _memory_group.manage(&_batched_mm_output);
-
- // Do not manage _input1 as it contains the weights
-
- // Configure input transform
- _input_transform.configure(compile_context, input, &_input0, winograd_info);
-
- // Configure filter transform
- _filter_transform->configure(compile_context, weights, &_input1, winograd_info);
-
- // Configure batched matrix multiply
- _batched_mm.configure(compile_context, &_input0, &_input1, nullptr, &_batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0, false, false,
- GEMMLowpOutputStageInfo(),
- (input->info()->data_type() == DataType::F16)));
-
- // Configure output transform
- _output_transform->configure(compile_context, &_batched_mm_output, biases, output, winograd_info, act_info);
+ { TensorType::ACL_SRC_0, _impl->src },
+ { TensorType::ACL_SRC_1, _impl->weights },
+ { TensorType::ACL_SRC_2, _impl->biases },
+ { TensorType::ACL_DST, _impl->dst }
+ };
- // Allocate temporary tensors
- _input0.allocator()->allocate();
- _batched_mm_output.allocator()->allocate();
+ _impl->prep_pack = { { TensorType::ACL_SRC_1, _impl->weights } };
+ _impl->workspace_tensors = manage_workspace<CLTensor>(_impl->op->workspace(), _impl->memory_group, _impl->run_pack, _impl->prep_pack);
}
Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
const ActivationLayerInfo &act_info, bool enable_fast_math)
{
- // Get indeces for the width and height
- const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
- const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
-
- // Input shape, kernel size and output tile
- const Size2D input_dims = Size2D(input->tensor_shape()[idx_width], input->tensor_shape()[idx_height]);
- const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
- const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, input->data_layout());
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_left() > (kernel_size.x() / 2u)) || (conv_info.pad_right() > (kernel_size.x() / 2u))), "Winograd only supports padding up to half kernel size");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_top() > (kernel_size.y() / 2u)) || (conv_info.pad_bottom() > (kernel_size.y() / 2u))), "Winograd only supports padding up to half kernel size");
-
- // Check if the Winograd configuration requires fast math
- if(!enable_fast_math)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
- }
-
- const WinogradInfo winograd_info = WinogradInfo(output_tile,
- kernel_size,
- input_dims,
- conv_info,
- input->data_layout());
-
- // Validate input transform
- const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
- const TensorInfo input0 = input->clone()->set_tensor_shape(input0_shape);
- ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransform::validate(input, &input0, winograd_info));
-
- // Validate filter transform
- const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
- const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
- ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradFilterTransformKernel::validate(weights, &input1, winograd_info));
-
- // Validate batched matrix multiply
- TensorShape batched_mm_output_shape = input0.tensor_shape();
- batched_mm_output_shape[0] = input1.tensor_shape()[0];
- const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
- ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input0, &input1, nullptr, &batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0, false, false,
- GEMMLowpOutputStageInfo(), (input->data_type() == DataType::F16))));
-
- // Configure output transform
- ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradOutputTransformKernel::validate(&batched_mm_output, biases, output, winograd_info, act_info));
-
- return Status{};
+ return opencl::ClWinogradConv2d::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math);
}
void CLWinogradConvolutionLayer::run()
{
+ MemoryGroupResourceScope scope_mg(_impl->memory_group);
prepare();
-
- MemoryGroupResourceScope scope_mg(_memory_group);
-
- // Run input transform
- _input_transform.run();
-
- // Run batched matrix multiplication
- _batched_mm.run();
-
- // Run output transform
- CLScheduler::get().enqueue(*_output_transform);
+ _impl->op->run(_impl->run_pack);
}
void CLWinogradConvolutionLayer::prepare()
{
- if(!_is_prepared)
+ if(!_impl->is_prepared)
{
- // Run filter transform and mark original weights as unused
- _input1.allocator()->allocate();
- CLScheduler::get().enqueue(*_filter_transform, false);
- _original_weights->mark_as_unused();
-
- // Prepare GEMM and release reshaped weights if marked unused by CLGEMM
- _batched_mm.prepare();
- if(!_input1.is_used())
- {
- _input1.allocator()->free();
- }
-
- CLScheduler::get().queue().finish();
- _is_prepared = true;
+ _impl->op->prepare(_impl->prep_pack);
+ _impl->is_prepared = true;
}
}
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLWinogradInputTransform.cpp b/src/runtime/CL/functions/CLWinogradInputTransform.cpp
deleted file mode 100644
index 6d5a692bc3..0000000000
--- a/src/runtime/CL/functions/CLWinogradInputTransform.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Error.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLWinogradInputTransformKernel.h"
-
-using namespace arm_compute;
-
-void CLWinogradInputTransform::configure(ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info);
-}
-
-void CLWinogradInputTransform::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
-{
- auto k = std::make_unique<CLWinogradInputTransformKernel>();
- k->configure(compile_context, input, output, winograd_info);
- _kernel = std::move(k);
- _border_handler->configure(compile_context, input, _kernel->border_size(), BorderMode::CONSTANT, PixelValue());
-}
-
-Status CLWinogradInputTransform::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransformKernel::validate(input, output, winograd_info));
- return Status{};
-}
diff --git a/src/runtime/gpu/cl/operators/ClWinogradConv2d.cpp b/src/runtime/gpu/cl/operators/ClWinogradConv2d.cpp
new file mode 100644
index 0000000000..c8db697778
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClWinogradConv2d.cpp
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/gpu/cl/operators/ClWinogradConv2d.h"
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h"
+#include "src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h"
+#include "src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/gpu/cl/utils/ClAuxTensorHandler.h"
+#include "support/Cast.h"
+
+using namespace arm_compute::experimental;
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace
+{
+Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims, DataLayout data_layout)
+{
+ Size2D output_tile = Size2D{};
+
+ const unsigned int kernel_max_dim = std::max(kernel_dims.width, kernel_dims.height);
+
+ // Check if the input spatial dimensions are smaller than 4
+ const bool is_input_lt4_nchw = (input_dims.width <= 4 && input_dims.height <= 4) && (data_layout == DataLayout::NCHW);
+
+ if(kernel_max_dim == 3U)
+ {
+ if(kernel_dims == Size2D(3U, 3U))
+ {
+ output_tile = is_input_lt4_nchw ? Size2D(2U, 2U) : Size2D(4U, 4U);
+ }
+ else if(kernel_dims == Size2D(3U, 1U))
+ {
+ output_tile = is_input_lt4_nchw ? Size2D(2U, 1U) : Size2D(4U, 1U);
+ }
+ else
+ {
+ output_tile = is_input_lt4_nchw ? Size2D(1U, 2U) : Size2D(1U, 4U);
+ }
+ }
+ else if(kernel_max_dim == 5U)
+ {
+ output_tile = Size2D(kernel_dims.width == 1 ? 1U : 4U,
+ kernel_dims.height == 1 ? 1U : 4U);
+ }
+ else if(kernel_max_dim == 7U)
+ {
+ output_tile = Size2D(kernel_dims.width == 1 ? 1U : 2U,
+ kernel_dims.height == 1 ? 1U : 2U);
+ }
+
+ return output_tile;
+}
+
+bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_size)
+{
+ // Check if we want to configure a Winograd configuration which requires fast math
+ using WinogradConfiguration = std::pair<std::pair<int, int>, std::pair<int, int>>;
+
+ std::vector<WinogradConfiguration> fast_math_winograd =
+ {
+ WinogradConfiguration(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5)),
+ WinogradConfiguration(std::pair<int, int>(2, 2), std::pair<int, int>(7, 7))
+ };
+
+ auto p = std::make_pair(std::pair<int, int>(output_tile.width, output_tile.height),
+ std::pair<int, int>(kernel_size.width, kernel_size.height));
+
+ return std::find(fast_math_winograd.begin(), fast_math_winograd.end(), p) != fast_math_winograd.end();
+}
+
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info, bool enable_fast_math)
+{
+ // Get indeces for the width and height
+ const size_t idx_width = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::HEIGHT);
+
+ // Input shape, kernel size and output tile
+ const Size2D input_dims = Size2D(src->tensor_shape()[idx_width], src->tensor_shape()[idx_height]);
+ const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
+ const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, src->data_layout());
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_left() > (kernel_size.x() / 2u)) || (conv_info.pad_right() > (kernel_size.x() / 2u))), "Winograd only supports padding up to half kernel size");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_top() > (kernel_size.y() / 2u)) || (conv_info.pad_bottom() > (kernel_size.y() / 2u))), "Winograd only supports padding up to half kernel size");
+
+ // Check if the Winograd configuration requires fast math
+ if(!enable_fast_math)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
+ }
+
+ const WinogradInfo winograd_info = WinogradInfo(output_tile,
+ kernel_size,
+ input_dims,
+ conv_info,
+ src->data_layout());
+
+ // Validate input transform
+ const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*src, winograd_info);
+ const TensorInfo input0 = src->clone()->set_tensor_shape(input0_shape);
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClWinogradInputTransformKernel::validate(src, &input0, winograd_info));
+
+ // Validate filter transform
+ const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
+ const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClWinogradFilterTransformKernel::validate(weights, &input1, winograd_info));
+
+ // Validate batched matrix multiply
+ TensorShape batched_mm_output_shape = input0.tensor_shape();
+ batched_mm_output_shape[0] = input1.tensor_shape()[0];
+ const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
+ ARM_COMPUTE_RETURN_ON_ERROR(ClGemm::validate(&input0, &input1, nullptr, &batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0, false, false,
+ GEMMLowpOutputStageInfo(), (src->data_type() == DataType::F16))));
+
+ // Configure output transform
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClWinogradOutputTransformKernel::validate(&batched_mm_output, biases, dst, winograd_info, act_info));
+ return Status{};
+}
+
+} // namespace
+
+ClWinogradConv2d::ClWinogradConv2d()
+ : _batched_mm(),
+ _input_transform(std::make_unique<kernels::ClWinogradInputTransformKernel>()),
+ _filter_transform(std::make_unique<kernels::ClWinogradFilterTransformKernel>()),
+ _output_transform(std::make_unique<kernels::ClWinogradOutputTransformKernel>()),
+ _border_handler(),
+ _input0(),
+ _input1(),
+ _batched_mm_output(),
+ _is_prepared(false),
+ _aux_mem()
+{
+}
+
+ClWinogradConv2d::~ClWinogradConv2d() = default;
+
+void ClWinogradConv2d::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, bool enable_fast_math)
+{
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, biases, dst, conv_info, act_info, enable_fast_math));
+ // Get indices for the width and height
+ const size_t idx_width = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_height = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::HEIGHT);
+
+ // Input shape, kernel size and output tile
+ const Size2D input_dims = Size2D(src->tensor_shape()[idx_width], src->tensor_shape()[idx_height]);
+ const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
+ const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, src->data_layout());
+
+ // Check if the Winograd configuration requires fast math
+ if(!enable_fast_math)
+ {
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
+ ARM_COMPUTE_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
+ }
+ const WinogradInfo winograd_info = WinogradInfo(output_tile,
+ kernel_size,
+ input_dims,
+ conv_info,
+ src->data_layout());
+
+ _is_prepared = false;
+
+ // Configure input transform
+ _input_transform->configure(compile_context, src, &_input0, winograd_info);
+ _border_handler.configure(compile_context, src, _input_transform->border_size(), BorderMode::CONSTANT, PixelValue());
+
+ // Configure filter transform
+ _filter_transform->configure(compile_context, weights, &_input1, winograd_info);
+
+ // Configure batched matrix multiply
+ _batched_mm.configure(compile_context, &_input0, &_input1, nullptr, &_batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0,
+ false, false,
+ GEMMLowpOutputStageInfo(),
+ (src->data_type() == DataType::F16)));
+
+ // Configure output transform
+ _output_transform->configure(compile_context, &_batched_mm_output, biases, dst, winograd_info, act_info);
+
+ _aux_mem = _batched_mm.workspace();
+ _aux_mem.push_back(MemoryInfo(offset_int_vec(2), MemoryLifetime::Temporary, _input0.total_size()));
+ _aux_mem.push_back(MemoryInfo(offset_int_vec(3), MemoryLifetime::Persistent, _input1.total_size()));
+ _aux_mem.push_back(MemoryInfo(offset_int_vec(4), MemoryLifetime::Temporary, _batched_mm_output.total_size()));
+}
+
+Status ClWinogradConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info, bool enable_fast_math)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, biases, dst, conv_info, act_info, enable_fast_math));
+ return Status{};
+}
+
+void ClWinogradConv2d::run(ITensorPack &tensors)
+{
+ prepare(tensors);
+
+ // Run input transform
+ auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
+ auto biases = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
+ CLAuxTensorHandler input0(offset_int_vec(2), _input0, tensors, true);
+ CLAuxTensorHandler input1(offset_int_vec(3), _input1, tensors, true);
+ CLAuxTensorHandler batched_mm_output(offset_int_vec(4), _batched_mm_output, tensors, true);
+
+ ITensorPack pack_it
+ {
+ { TensorType::ACL_SRC, src },
+ { TensorType::ACL_DST, input0.get() },
+ };
+ CLScheduler::get().enqueue_op(_border_handler, pack_it);
+ CLScheduler::get().enqueue_op(*_input_transform, pack_it);
+
+ // Run batched matrix multiplication
+ ITensorPack pack_mm
+ {
+ { TensorType::ACL_SRC_0, input0.get() },
+ { TensorType::ACL_SRC_1, input1.get() },
+ { TensorType::ACL_DST, batched_mm_output.get() },
+ };
+ _batched_mm.run(pack_mm);
+
+ // Run output transform
+ ITensorPack pack_ot
+ {
+ { TensorType::ACL_SRC_0, batched_mm_output.get() },
+ { TensorType::ACL_SRC_1, biases },
+ { TensorType::ACL_DST, dst },
+ };
+ CLScheduler::get().enqueue_op(*_output_transform, pack_ot);
+}
+
+void ClWinogradConv2d::prepare(ITensorPack &tensors)
+{
+ if(!_is_prepared)
+ {
+ auto weights = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
+ ICLTensor *in1_aux = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(offset_int_vec(3)));
+
+ CLAuxTensorHandler input1(_input1, *in1_aux);
+ ITensorPack pack_ft
+ {
+ { TensorType::ACL_SRC, weights },
+ { TensorType::ACL_DST, input1.get() },
+ };
+ // Run filter transform and mark original weights as unused
+ CLScheduler::get().enqueue_op(*_filter_transform, pack_ft, false);
+ weights->mark_as_unused();
+
+ tensors.add_tensor(ACL_SRC_1, input1.get());
+ // Prepare GEMM and release reshaped weights if marked unused by ClGemm
+ _batched_mm.prepare(tensors);
+
+ CLScheduler::get().queue().finish();
+ _is_prepared = true;
+ }
+}
+
+experimental::MemoryRequirements ClWinogradConv2d::workspace() const
+{
+ return _aux_mem;
+}
+} // namespace opencl
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/gpu/cl/operators/ClWinogradConv2d.h b/src/runtime/gpu/cl/operators/ClWinogradConv2d.h
new file mode 100644
index 0000000000..83b31f1c99
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClWinogradConv2d.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_WINOGRADCONV2D_H
+#define ARM_COMPUTE_CL_WINOGRADCONV2D_H
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+#include "src/runtime/gpu/cl/operators/ClGemm.h"
+
+namespace arm_compute
+{
+class CLCompileContext;
+class ITensorInfo;
+namespace opencl
+{
+namespace kernels
+{
+class ClWinogradInputTransformKernel;
+class ClWinogradFilterTransformKernel;
+class ClWinogradOutputTransformKernel;
+} // kernels
+/** Basic function to execute Winograd-based convolution on OpenCL. This function calls the following OpenCL functions/kernels:
+ *
+ * -# @ref kernels::ClWinogradInputTransformKernel
+ * -# @ref kernels::ClWinogradFilterTransformKernel (only once)
+ * -# @ref ClGemm
+ * -# @ref kernels::ClWinogradOutputTransformKernel
+ *
+ */
+class ClWinogradConv2d : public IClOperator
+{
+public:
+ /** Default constructor */
+ ClWinogradConv2d();
+ /** Default destructor */
+ ~ClWinogradConv2d();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ ClWinogradConv2d(const ClWinogradConv2d &) = delete;
+ /** Default move constructor */
+ ClWinogradConv2d(ClWinogradConv2d &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ ClWinogradConv2d &operator=(const ClWinogradConv2d &) = delete;
+ /** Default move assignment operator */
+ ClWinogradConv2d &operator=(ClWinogradConv2d &&) = default;
+ /** Set the input and output tensors.
+ *
+ * Valid data layouts:
+ * - NHWC
+ * - NCHW
+ *
+ * Valid data type configurations:
+ * |src0 |src1 |src2 |dst |
+ * |:--------------|:--------------|:------|:--------------|
+ * |F16 |F16 |F16 |F16 |
+ * |F32 |F32 |F32 |F32 |
+ *
+ * @note: This function only works with 3x3,3x1,1x3,5x5,5x1,1x5,7x1 and 1x7 kernels along with unit strides for both NCHW and NHWC data layout
+ * @note Some Winograd configurations (i.e. F(4x4, 5x5)) are supported only with enable_fast_math = true
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src Source tensor info. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F16/F32.
+ * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p src.
+ * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p src
+ * @param[out] dst Destination tensor info. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p src.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
+ * available which may introduce a drop of accuracy as well. Default is false
+ */
+ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClWinogradConv2d::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
+
+ // Inherited method overridden
+ void run(ITensorPack &tensors) override;
+ void prepare(ITensorPack &tensors) override;
+ experimental::MemoryRequirements workspace() const override;
+
+private:
+ ClGemm _batched_mm;
+ std::unique_ptr<kernels::ClWinogradInputTransformKernel> _input_transform;
+ std::unique_ptr<kernels::ClWinogradFilterTransformKernel> _filter_transform;
+ std::unique_ptr<kernels::ClWinogradOutputTransformKernel> _output_transform;
+ CLFillBorderKernel _border_handler;
+ TensorInfo _input0;
+ TensorInfo _input1;
+ TensorInfo _batched_mm_output;
+ bool _is_prepared;
+ experimental::MemoryRequirements _aux_mem{};
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_WINOGRADCONV2D_H */
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 7ccc850be5..9952427762 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -27,9 +27,6 @@
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h"
-#include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h"
-#include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/PaddingCalculator.h"
@@ -64,109 +61,6 @@ constexpr float abs_tolerance_f32_nightly = 0.003f; /**< Abs
constexpr float abs_tolerance_convolution_layer_f16 = 2.5f; /**< Tolerance number */
constexpr float tolerance_num_f16 = 0.15f; /**< Tolerance number */
-// Input transform
-const auto SmallWinogradInputTransformDatasetNCHW =
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset2x2_3x3(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset2x1_3x1(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset1x2_1x3(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x4_3x3(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x1_3x1(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset1x4_1x3(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x4_5x5(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x1_5x1(),
- datasets::SmallWinogradInputTransformDataset1x4_1x5()))))))));
-
-const auto SmallWinogradInputTransformDatasetNHWC = framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x4_3x3(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x1_3x1(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset1x4_1x3(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x4_5x5(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x1_5x1(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset1x4_1x5(),
- framework::dataset::concat(datasets::SmallWinogradInputTransformDataset2x1_7x1(),
- datasets::SmallWinogradInputTransformDataset1x2_1x7())))))));
-
-const auto SmallWinogradInputTransformDatasetNHWC_FP32 = framework::dataset::concat(SmallWinogradInputTransformDatasetNHWC,
- datasets::SmallWinogradInputTransformDataset2x2_7x7());
-
-const auto LargeWinogradInputTransformDatasetNCHW =
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset2x2_3x3(),
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset2x1_3x1(),
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset1x2_1x3(),
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x4_3x3(),
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x1_3x1(),
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset1x4_1x3(),
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x4_5x5(),
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x1_5x1(),
- datasets::LargeWinogradInputTransformDataset1x4_1x5()))))))));
-
-const auto LargeWinogradInputTransformDatasetNHWC =
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x4_3x3(),
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x4_5x5(),
- framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x1_5x1(),
- datasets::LargeWinogradInputTransformDataset1x4_1x5())));
-
-const auto LargeWinogradInputTransformDatasetNHWC_FP32 =
- framework::dataset::concat(LargeWinogradInputTransformDatasetNHWC,
- (datasets::LargeWinogradInputTransformDataset2x2_7x7()));
-
-// Filter transform
-const auto SmallWinogradFilterTransformDatasetNCHW =
- framework::dataset::concat(combine(datasets::Small3x3Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
- framework::dataset::concat(combine(datasets::Small3x1Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 1U), Size2D(4U, 1U) })),
- framework::dataset::concat(combine(datasets::Small1x3Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 2U), Size2D(1U, 4U) })),
- framework::dataset::concat(combine(datasets::Small5x5Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
- framework::dataset::concat(combine(datasets::Small5x1Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 1U) })),
- combine(datasets::Small1x5Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 4U) })))))));
-
-const auto SmallWinogradFilterTransformDatasetNHWC_F16 =
- framework::dataset::concat(combine(datasets::Small3x3Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
- framework::dataset::concat(combine(datasets::Small3x1Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 1U) })),
- framework::dataset::concat(combine(datasets::Small1x3Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 4U) })),
- framework::dataset::concat(combine(datasets::Small5x5Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
- framework::dataset::concat(combine(datasets::Small5x1Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 1U) })),
- framework::dataset::concat(combine(datasets::Small1x5Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 4U) })),
- framework::dataset::concat(combine(datasets::Small1x7Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 2U) })),
- combine(datasets::Small7x1Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 1U) })))))))));
-
-const auto SmallWinogradFilterTransformDatasetNHWC_F32 =
- framework::dataset::concat(SmallWinogradFilterTransformDatasetNHWC_F16,
- combine(datasets::Small7x7Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 2U) })));
-
-const auto LargeWinogradFilterTransformDatasetNCHW =
- framework::dataset::concat(combine(datasets::Large3x3Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
- framework::dataset::concat(combine(datasets::Large3x1Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 1U), Size2D(4U, 1U) })),
- framework::dataset::concat(combine(datasets::Large1x3Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 2U), Size2D(1U, 4U) })),
- framework::dataset::concat(combine(datasets::Large5x5Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
- framework::dataset::concat(combine(datasets::Large5x1Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 1U) })),
- combine(datasets::Large1x5Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 4U) })))))));
-
-const auto LargeWinogradFilterTransformDatasetNHWC_F16 =
- framework::dataset::concat(combine(datasets::Large3x3Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
- framework::dataset::concat(combine(datasets::Large3x1Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 1U) })),
- framework::dataset::concat(combine(datasets::Large1x3Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 4U) })),
- framework::dataset::concat(combine(datasets::Large5x5Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
- framework::dataset::concat(combine(datasets::Large5x1Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 1U) })),
- framework::dataset::concat(combine(datasets::Large1x5Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 4U) })),
- framework::dataset::concat(combine(datasets::Large7x1Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 1U) })),
- combine(datasets::Large1x7Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 2U) })))))))));
-
-const auto LargeWinogradFilterTransformDatasetNHWC_F32 =
- framework::dataset::concat(LargeWinogradFilterTransformDatasetNHWC_F16,
- combine(datasets::Large7x7Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 2U) })));
-
-// Output transform
-const auto SmallWinogradOutputTransformDatasetNCHW = datasets::SmallWinogradOutputTransformDatasetNCHW();
-
-const auto SmallWinogradOutputTransformDatasetNHWC_F16 = datasets::SmallWinogradOutputTransformDatasetNHWC_F16();
-
-const auto SmallWinogradOutputTransformDatasetNHWC_F32 = datasets::SmallWinogradOutputTransformDatasetNHWC_F32();
-
-const auto LargeWinogradOutputTransformDatasetNCHW = datasets::LargeWinogradOutputTransformDatasetNCHW();
-
-const auto LargeWinogradOutputTransformDatasetNHWC_F16 = datasets::LargeWinogradOutputTransformDatasetNHWC_F16();
-
-const auto LargeWinogradOutputTransformDatasetNHWC_F32 = datasets::LargeWinogradOutputTransformDatasetNHWC_F32();
-
//Activation Functions
const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
{
@@ -191,421 +85,6 @@ using namespace arm_compute::misc::shape_calculator;
TEST_SUITE(CL)
TEST_SUITE(Winograd)
-TEST_SUITE(InputTransform)
-
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
- framework::dataset::make("InputInfo",{
- TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F16), // F16 not supported
- TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::QASYMM8), // QASYMM8 not supported
- TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F32), // Kernel size not supported
- TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F32), // Strides not supported
- TensorInfo(TensorShape(53U, 33U, 4U), 1, DataType::F32), // Padding needed
- TensorInfo(TensorShape(34U, 42U, 7U, 3U), 1, DataType::F32), // Padding needed
- TensorInfo(TensorShape(31U, 37U, 37U), 1, DataType::F32) // Padding needed
- }),
- framework::dataset::make("OutputInfo", {
- TensorInfo(TensorShape(5U, 5U, 16U, 3U), 1, DataType::F16),
- TensorInfo(TensorShape(5U, 5U, 16U, 3U), 1, DataType::QASYMM8),
- TensorInfo(TensorShape(5U, 5U, 16U, 3U), 1, DataType::F32),
- TensorInfo(TensorShape(5U, 1U, 16U, 3U), 1, DataType::F32),
- TensorInfo(TensorShape(4U, 442U, 16U), 1, DataType::F32),
- TensorInfo(TensorShape(7U, 320U, 16U, 3U), 1, DataType::F32),
- TensorInfo(TensorShape(37U, 304U, 16U), 1, DataType::F32)
- })),
- framework::dataset::make("WinogradInfo", {
- WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 1, 0), DataLayout::NCHW),
- WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
- WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(2, 1, 1, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 33U), PadStrideInfo(1, 1, 0, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(34U, 42U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
- WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(31U, 37U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW)
- })),
- framework::dataset::make("Expected", { false, false, false, false, false, false, false })),
- input_info, output_info, winograd_info, expected)
-{
- ARM_COMPUTE_EXPECT(bool(CLWinogradInputTransform::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
-}
-
-using CLWinogradInputTransformFixtureFP32 = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, float>;
-using CLWinogradInputTransformMixedDataLayoutFixtureFP32 = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, float, true>;
-using CLWinogradInputTransformFixtureFP16 = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, half>;
-
-TEST_SUITE(NCHW)
-TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixtureFP32, framework::DatasetMode::PRECOMMIT, combine(combine(SmallWinogradInputTransformDatasetNCHW,
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradInputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT, combine(combine(
- datasets::SmallWinogradInputTransformDataset2x2_3x3(),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNCHW,
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-TEST_SUITE_END() // FP32
-
-TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixtureFP16, framework::DatasetMode::PRECOMMIT, combine(combine(SmallWinogradInputTransformDatasetNCHW,
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F16 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f16);
-}
-
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixtureFP16, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNCHW,
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F16 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f16);
-}
-TEST_SUITE_END() // FP16
-TEST_SUITE_END() // NCHW
-
-TEST_SUITE(NHWC)
-TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixtureFP16, framework::DatasetMode::PRECOMMIT, combine(combine(SmallWinogradInputTransformDatasetNHWC,
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F16 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num_f16);
-}
-
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixtureFP16, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNHWC,
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F16 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num_f16);
-}
-TEST_SUITE_END() // FP16
-TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixtureFP32, framework::DatasetMode::PRECOMMIT, combine(combine(SmallWinogradInputTransformDatasetNHWC_FP32,
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradInputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT, combine(combine(
- datasets::SmallWinogradInputTransformDataset4x4_3x3(),
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNHWC_FP32,
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32_nightly);
-}
-TEST_SUITE_END() // FP32
-TEST_SUITE_END() // NHWC
-TEST_SUITE_END() // InputTransform
-
-TEST_SUITE(FilterTransform)
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
- framework::dataset::make("InputInfo",{
- TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::F16), // F16 supported
- TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::QASYMM8), // QASYMM8 not supported
- TensorInfo(TensorShape(5U, 5U, 5U, 3U), 1, DataType::F32), // Kernel size not supported
- TensorInfo(TensorShape(3U, 3U), 1, DataType::F32), // Output tile not supported
- TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::F32), // valid
- TensorInfo(TensorShape(3U, 3U, 37U, 2U), 1, DataType::F32), // valid
- TensorInfo(TensorShape(3U, 3U, 37U, 22U), 1, DataType::F32) // valid
- }),
- framework::dataset::make("OutputInfo", {
- TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F16),
- TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::QASYMM8),
- TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F32),
- TensorInfo(TensorShape(1U, 1U, 16U), 1, DataType::F32),
- TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F32),
- TensorInfo(TensorShape(2U, 37U, 16U), 1, DataType::F32),
- TensorInfo(TensorShape(22U, 37U, 36U), 1, DataType::F32)
- })),
- framework::dataset::make("WinogradInfo", {
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
- WinogradInfo(Size2D(3U, 3U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
- WinogradInfo(Size2D(4U, 4U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ )
- })),
- framework::dataset::make("Expected", { true, false, false, false, true, true, true })),
- input_info, output_info, winograd_info, expected)
-{
- ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
-}
-
-using CLWinogradFilterTransform = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradFilterTransformKernel, 0>;
-using CLWinogradFilterTransformFixtureFP32 = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float>;
-using CLWinogradFilterTransformMixedDataLayoutFixtureFP32 = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float, true>;
-using CLWinogradFilterTransformFixtureFP16 = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, half>;
-
-TEST_SUITE(NCHW)
-TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixtureFP32, framework::DatasetMode::PRECOMMIT,
- combine(combine(SmallWinogradFilterTransformDatasetNCHW,
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradFilterTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(datasets::Small3x3Shapes(),
- framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixtureFP32, framework::DatasetMode::NIGHTLY,
- combine(combine(LargeWinogradFilterTransformDatasetNCHW,
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-TEST_SUITE_END() // FP32
-TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixtureFP16, framework::DatasetMode::PRECOMMIT,
- combine(combine(SmallWinogradFilterTransformDatasetNCHW,
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F16 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f16);
-}
-
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixtureFP16, framework::DatasetMode::NIGHTLY,
- combine(combine(LargeWinogradFilterTransformDatasetNCHW,
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- framework::dataset::make("DataType", { DataType::F16 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f16);
-}
-TEST_SUITE_END() // FP16
-TEST_SUITE_END() // NCHW
-
-TEST_SUITE(NHWC)
-TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixtureFP16, framework::DatasetMode::PRECOMMIT,
- combine(combine(SmallWinogradFilterTransformDatasetNHWC_F16,
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F16 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num_f16);
-}
-
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixtureFP16, framework::DatasetMode::NIGHTLY,
- combine(combine(LargeWinogradFilterTransformDatasetNHWC_F16,
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F16 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num_f16);
-}
-TEST_SUITE_END() // FP16
-TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixtureFP32, framework::DatasetMode::PRECOMMIT,
- combine(combine(SmallWinogradFilterTransformDatasetNHWC_F32,
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradFilterTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(datasets::Small3x3Shapes(),
- framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixtureFP32, framework::DatasetMode::NIGHTLY,
- combine(combine(LargeWinogradFilterTransformDatasetNHWC_F32,
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
- framework::dataset::make("DataType", { DataType::F32 })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-TEST_SUITE_END() // FP32
-TEST_SUITE_END() // NHWC
-TEST_SUITE_END() // FilterTransform
-
-TEST_SUITE(OutputTransform)
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
- framework::dataset::make("InputInfo",{
- TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F16), // F16 supported
- TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::QASYMM8), // QASYMM8 not supported
- TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F32), // Kernel size not supported
- TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F32), // Valid
- TensorInfo(TensorShape(13U, 108U, 16U, 4U), 1, DataType::F32), // Padding needed
- TensorInfo(TensorShape(7U, 20U, 16U, 7U), 1, DataType::F32), // Valid
- TensorInfo(TensorShape(7U, 20U, 16U, 7U), 1, DataType::F32), // Wrong WinogradInfo
- TensorInfo(TensorShape(7U, 256U, 36U, 3U), 1, DataType::F32), // Valid
- TensorInfo(TensorShape(7U, 256U, 16U, 3U), 1, DataType::F32) // Wrong number of batches
- }),
- framework::dataset::make("BiasInfo", {
- TensorInfo(TensorShape(512U), 1, DataType::F16),
- TensorInfo(TensorShape(512U), 1, DataType::QASYMM8),
- TensorInfo(TensorShape(512U), 1, DataType::F32),
- TensorInfo(TensorShape(512U), 1, DataType::F32),
- TensorInfo(TensorShape(13U), 1, DataType::F32),
- TensorInfo(TensorShape(7U), 1, DataType::F32),
- TensorInfo(TensorShape(7U), 1, DataType::F32),
- TensorInfo(TensorShape(7U), 1, DataType::F32),
- TensorInfo(TensorShape(7U), 1, DataType::F32)
- })),
- framework::dataset::make("OutputInfo", {
- TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F16),
- TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::QASYMM8),
- TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(17U, 23U, 13U, 4U), 1, DataType::F32),
- TensorInfo(TensorShape(8U, 10U, 7U, 7U), 1, DataType::F32),
- TensorInfo(TensorShape(7U, 9U, 7U, 7U), 1, DataType::F32),
- TensorInfo(TensorShape(64U, 64U, 7U, 3U), 1, DataType::F32),
- TensorInfo(TensorShape(64U, 64U, 7U, 3U), 1, DataType::F32)
- })),
- framework::dataset::make("WinogradInfo", {
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(2U, 2U), Size2D(5U, 5U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(17U, 23U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(2U, 3U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
- WinogradInfo(Size2D(4U, 4U), Size2D(3U, 3U), Size2D(64U, 64U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
- WinogradInfo(Size2D(4U, 4U), Size2D(3U, 3U), Size2D(64U, 64U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW)
- })),
- framework::dataset::make("Expected", { true, false, false, true, false, true, false, true, false })),
- input_info, bias_info, output_info, winograd_info, expected)
-{
- ARM_COMPUTE_EXPECT(bool(CLWinogradOutputTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
-}
-
-using CLWinogradOutputTransform = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradOutputTransformKernel, 0>;
-using CLWinogradOutputTransformFixtureFP32 = WinogradOutputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradOutputTransform, float>;
-using CLWinogradOutputTransformMixedDataLayoutFixtureFP32 = WinogradOutputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradOutputTransform, float, true>;
-using CLWinogradOutputTransformFixtureFP16 = WinogradOutputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradOutputTransform, half>;
-
-TEST_SUITE(NCHW)
-TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixtureFP16, framework::DatasetMode::ALL,
- combine(combine(SmallWinogradOutputTransformDatasetNCHW,
- framework::dataset::make("DataType", { DataType::F16 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f16);
-}
-
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixtureFP16, framework::DatasetMode::NIGHTLY,
- combine(combine(LargeWinogradOutputTransformDatasetNCHW,
- framework::dataset::make("DataType", { DataType::F16 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f16);
-}
-TEST_SUITE_END() // FP16
-TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixtureFP32, framework::DatasetMode::ALL,
- combine(combine(SmallWinogradOutputTransformDatasetNCHW,
- framework::dataset::make("DataType", { DataType::F32 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradOutputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::ALL,
- combine(combine(combine(framework::dataset::make("Input", TensorShape(13U, 6U, 16U)),
- framework::dataset::make("WinogradInfo", WinogradInfo(Size2D(2U, 2U),Size2D(3U, 3U), Size2D(7U, 6U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW))),
- framework::dataset::make("DataType", { DataType::F32 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixtureFP32, framework::DatasetMode::NIGHTLY,
- combine(combine(LargeWinogradOutputTransformDatasetNCHW,
- framework::dataset::make("DataType", { DataType::F32 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-TEST_SUITE_END() // FP32
-TEST_SUITE_END() // NCHW
-
-TEST_SUITE(NHWC)
-TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixtureFP16, framework::DatasetMode::ALL,
- combine(combine(SmallWinogradOutputTransformDatasetNHWC_F16,
- framework::dataset::make("DataType", { DataType::F16 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num_f16);
-}
-
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixtureFP16, framework::DatasetMode::NIGHTLY,
- combine(combine(LargeWinogradOutputTransformDatasetNHWC_F16,
- framework::dataset::make("DataType", { DataType::F16 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num_f16);
-}
-TEST_SUITE_END() // FP16
-TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixtureFP32, framework::DatasetMode::ALL,
- combine(combine(SmallWinogradOutputTransformDatasetNHWC_F32,
- framework::dataset::make("DataType", { DataType::F32 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradOutputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::ALL,
- combine(combine(combine(framework::dataset::make("Input", TensorShape(1U, 4U, 64U)),
- framework::dataset::make("WinogradInfo", WinogradInfo(Size2D(2U, 2U), Size2D(7U, 7U), Size2D(9U, 9U), PadStrideInfo(1, 1, 0, 0), DataLayout::NHWC))),
- framework::dataset::make("DataType", { DataType::F32 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixtureFP32, framework::DatasetMode::NIGHTLY,
- combine(combine(LargeWinogradOutputTransformDatasetNHWC_F32,
- framework::dataset::make("DataType", { DataType::F32 })),
- framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
-TEST_SUITE_END() // FP32
-TEST_SUITE_END() // NHWC
-TEST_SUITE_END() // OutputTransform
-
TEST_SUITE(ConvolutionLayer)
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
framework::dataset::make("InputInfo", {
@@ -970,9 +449,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, fr
validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_convolution_layer_f16);
}
TEST_SUITE_END() // Conv1x7
-
TEST_SUITE_END() // FP16
-
TEST_SUITE_END() // ConvolutionLayer
TEST_SUITE_END() // Winograd
TEST_SUITE_END() // CL