aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/Types.h8
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h1
-rw-r--r--arm_compute/runtime/CL/functions/CLConvolutionLayer.h132
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h153
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp332
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp353
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp134
-rw-r--r--utils/TypePrinter.h66
8 files changed, 766 insertions, 413 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 417369cd9b..24c73ca7c1 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1058,5 +1058,13 @@ struct IOFormatInfo
std::string row_delim;
bool align_columns;
};
+
+/** Available ConvolutionMethod*/
+enum class ConvolutionMethod
+{
+ GEMM, /**< Convolution using GEMM */
+ DIRECT, /**< Direct convolution */
+ WINOGRAD /**< Convolution using Winograd */
+};
}
#endif /* __ARM_COMPUTE_TYPES_H__ */
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index 630b9535d9..a5bbc41a17 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -60,6 +60,7 @@
#include "arm_compute/runtime/CL/functions/CLFloor.h"
#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
#include "arm_compute/runtime/CL/functions/CLGEMM.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h"
#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
index f6672cef1d..53d59c3176 100644
--- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
@@ -26,71 +26,18 @@
#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/core/CL/kernels/CLCol2ImKernel.h"
-#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
-#include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
-#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
-#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
-#include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
-#include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/CLMemoryGroup.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLGEMM.h"
-#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
-#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
+#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include <memory>
namespace arm_compute
{
-class ICLTensor;
-
-/** Function to reshape and transpose the weights. This function calls the following kernels:
- * -# @ref CLWeightsReshapeKernel
- * -# @ref CLGEMMTranspose1xWKernel
- */
-class CLConvolutionLayerReshapeWeights : public IFunction
-{
-public:
- /** Constructor */
- CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
- /** Set the input and output tensors.
- *
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * Data type supported: QS8/QASYMM8/QS16/F16/F32.
- * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
- * @param[out] output Destination tensor. Data types supported: Same as @p weights.
- * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
- * Data types supported: Same as @p weights.
- */
- void configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose1xW);
- // Inherited methods overridden:
- void run() override;
-
-private:
- CLMemoryGroup _memory_group;
- CLWeightsReshapeKernel _weights_reshape_kernel;
- CLGEMMTranspose1xWKernel _weights_transposed_kernel;
- CLTensor _weights_reshaped;
- bool _transpose1xW;
-};
-
/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
*
- * Note: weights already reshaped for quantized asymmetric is not supported
- *
- * -# @ref CLIm2ColKernel
- * -# @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric)
- * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8Scale (if quantized asymmetric)
- * -# @ref CLCol2ImKernel
- *
- * if the weights are already reshaped:
- * -# @ref CLGEMMInterleave4x4Kernel
- * -# @ref CLGEMMMatrixMultiplyKernel
- * else
- * -# @ref CLGEMM
+ * -# @ref CLGEMMConvolutionLayer
+ * -# @ref CLDirectConvolutionLayer
*/
class CLConvolutionLayer : public IFunction
{
@@ -108,46 +55,49 @@ public:
* @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
- * tensor has also been transposed with CLGEMMTranspose1xWKernel. Data type supported: Same as @p input.
+ * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. Data type supported: Same as @p input.
+ */
+ void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref CLConvolutionLayer
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported:Same as @p input.
+ * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. Data type supported: Same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info = WeightsInfo());
+ /** Static function to check if given info will return the convolution called by @ref CLConvolutionLayer
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported:Same as @p input.
+ * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. Data type supported: Same as @p input.
+ * @param[in] gpu_target Specifies the @p GPUTarget.
+ *
+ * @return a status
*/
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo());
+ static ConvolutionMethod get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info, const GPUTarget gpu_target);
// Inherited methods overridden:
void run() override;
private:
- /** Configures the appropriate matrix multiply routine
- *
- * @param input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
- * @param weights Weights tensor. Data type supported: Same as @p input.
- * @param output Output tensor. Data types supported: Same as @p input,
- * except for input of QASYMM8 type where output should be of S32 type.
- * @param is_interleaved_transposed Flag that signals if matrix is interleaved transposed
- */
- void configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed, bool are_weights_reshaped);
-
-private:
- CLMemoryGroup _memory_group;
- CLConvolutionLayerReshapeWeights _reshape_weights;
- CLIm2ColKernel _im2col_kernel;
- CLGEMMInterleave4x4Kernel _interleave_kernel;
- CLGEMMMatrixMultiplyKernel _mm_kernel;
- CLGEMM _mm_gemm;
- CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
- CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
- CLCol2ImKernel _col2im_kernel;
-
- CLTensor _im2col_output;
- CLTensor _interleave_output;
- CLTensor _weights_reshaped;
- CLTensor _weights_transposed;
- CLTensor _gemm_output;
- CLTensor _tmp_output;
-
- bool _are_weights_reshaped;
- bool _is_quantized;
- bool _is_interleaved_transposed;
+ std::shared_ptr<IMemoryManager> _memory_manager;
+ std::unique_ptr<IFunction> _function; /**< Function to run */
};
}
#endif /* __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
new file mode 100644
index 0000000000..7126688f8b
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__
+#define __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/CL/kernels/CLCol2ImKernel.h"
+#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
+#include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
+#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
+#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
+#include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
+#include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLGEMM.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Function to reshape and transpose the weights. This function calls the following kernels:
+ * -# @ref CLWeightsReshapeKernel
+ * -# @ref CLGEMMTranspose1xWKernel
+ */
+class CLConvolutionLayerReshapeWeights : public IFunction
+{
+public:
+ /** Constructor */
+ CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Set the input and output tensors.
+ *
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
+ * Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
+ * @param[out] output Destination tensor. Data types supported: Same as @p weights.
+ * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
+ * Data types supported: Same as @p weights.
+ */
+ void configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose1xW);
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ CLMemoryGroup _memory_group;
+ CLWeightsReshapeKernel _weights_reshape_kernel;
+ CLGEMMTranspose1xWKernel _weights_transposed_kernel;
+ CLTensor _weights_reshaped;
+ bool _transpose1xW;
+};
+
+/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
+ *
+ * Note: weights already reshaped for quantized asymmetric is not supported
+ *
+ * -# @ref CLIm2ColKernel
+ * -# @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric)
+ * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8Scale (if quantized asymmetric)
+ * -# @ref CLCol2ImKernel
+ *
+ * if the weights are already reshaped:
+ * -# @ref CLGEMMInterleave4x4Kernel
+ * -# @ref CLGEMMMatrixMultiplyKernel
+ * else
+ * -# @ref CLGEMM
+ */
+class CLGEMMConvolutionLayer : public IFunction
+{
+public:
+ /** Default constructor */
+ CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Set the input and output tensors.
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
+ * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
+ * tensor has also been transposed with CLGEMMTranspose1xWKernel. Data type supported: Same as @p input.
+ */
+ void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo());
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ /** Configures the appropriate matrix multiply routine
+ *
+ * @param input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param weights Weights tensor. Data type supported: Same as @p input.
+ * @param output Output tensor. Data types supported: Same as @p input,
+ * except for input of QASYMM8 type where output should be of S32 type.
+ * @param is_interleaved_transposed Flag that signals if matrix is interleaved transposed
+ */
+ void configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed, bool are_weights_reshaped);
+
+private:
+ CLMemoryGroup _memory_group;
+ CLConvolutionLayerReshapeWeights _reshape_weights;
+ CLIm2ColKernel _im2col_kernel;
+ CLGEMMInterleave4x4Kernel _interleave_kernel;
+ CLGEMMMatrixMultiplyKernel _mm_kernel;
+ CLGEMM _mm_gemm;
+ CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
+ CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
+ CLCol2ImKernel _col2im_kernel;
+
+ CLTensor _im2col_output;
+ CLTensor _interleave_output;
+ CLTensor _weights_reshaped;
+ CLTensor _weights_transposed;
+ CLTensor _gemm_output;
+ CLTensor _tmp_output;
+
+ bool _are_weights_reshaped;
+ bool _is_quantized;
+ bool _is_interleaved_transposed;
+};
+}
+#endif /* __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__ */
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index d1533b6f24..c430174fe7 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -24,10 +24,8 @@
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
#include "arm_compute/core/PixelValue.h"
-#include "arm_compute/core/Size2D.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include <cmath>
@@ -36,315 +34,87 @@
using namespace arm_compute;
-CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _weights_reshape_kernel(), _weights_transposed_kernel(), _weights_reshaped(), _transpose1xW(false)
-{
-}
-
-void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose1xW)
-{
- ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
-
- if(biases != nullptr)
- {
- ARM_COMPUTE_ERROR_ON(is_data_type_quantized_asymmetric(weights->info()->data_type()));
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
- ARM_COMPUTE_ERROR_ON(biases->info()->dimension(0) != weights->info()->dimension(3));
- ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 1);
- }
-
- const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
- const unsigned bias_element = (append_biases) ? 1 : 0;
- const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
-
- _transpose1xW = transpose1xW;
-
- if(transpose1xW)
- {
- // Create tensor to store the reshaped weights
- const unsigned int mat_weights_cols = weights->info()->dimension(3);
- const unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + bias_element;
- TensorShape shape_wr(mat_weights_cols, mat_weights_rows);
- const DataType dt = weights->info()->data_type();
- const int fixed_point_position = weights->info()->fixed_point_position();
- TensorInfo info_wr(shape_wr, 1, dt, fixed_point_position);
-
- _weights_reshaped.allocator()->init(info_wr);
- _memory_group.manage(&_weights_reshaped);
- _weights_reshape_kernel.configure(weights, biases_to_use, &_weights_reshaped);
- _weights_transposed_kernel.configure(&_weights_reshaped, output);
- _weights_reshaped.allocator()->allocate();
- }
- else
- {
- _weights_reshape_kernel.configure(weights, biases_to_use, output);
- }
-
- output->info()->set_quantization_info(weights->info()->quantization_info());
-}
-
-void CLConvolutionLayerReshapeWeights::run()
-{
- _memory_group.acquire();
-
- CLScheduler::get().enqueue(_weights_reshape_kernel);
- if(_transpose1xW)
- {
- CLScheduler::get().enqueue(_weights_transposed_kernel);
- }
-
- _memory_group.release();
-}
-
CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _interleave_kernel(), _mm_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
- _col2im_kernel(), _im2col_output(), _interleave_output(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _are_weights_reshaped(false), _is_quantized(false),
- _is_interleaved_transposed(false)
+ : _memory_manager(std::move(memory_manager)), _function()
{
}
-void CLConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed, bool are_weights_reshaped)
+void CLConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
{
- if(_is_quantized)
- {
- if(are_weights_reshaped)
- {
- ARM_COMPUTE_ERROR("Weights already reshaped are not suppported with gemmlowp");
- }
- else
- {
- // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
- // Extract and negate input and weights offset
- const QuantizationInfo input_quantization_info = input->info()->quantization_info();
- const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
-
- input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
- weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
-
- _mm_gemmlowp.configure(input, weights, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+ ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info));
- // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
- input->info()->set_quantization_info(input_quantization_info);
- weights->info()->set_quantization_info(weights_quantization_info);
- }
- }
- else
+ switch(CLConvolutionLayer::get_convolution_method(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info,
+ weights_info, CLScheduler::get().target()))
{
- if(are_weights_reshaped)
+ case ConvolutionMethod::DIRECT:
{
- // Configure matrix multiply kernel
- _mm_kernel.configure(input, weights, output, 1.f, is_interleaved_transposed);
+ auto f = arm_compute::support::cpp14::make_unique<CLDirectConvolutionLayer>();
+ f->configure(input, weights, biases, output, conv_info);
+ _function = std::move(f);
+ break;
}
- else
+ case ConvolutionMethod::GEMM:
{
- // Configure matrix multiply function
- _mm_gemm.configure(input, weights, nullptr, output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/));
+ auto f = arm_compute::support::cpp14::make_unique<CLGEMMConvolutionLayer>(_memory_manager);
+ f->configure(input, weights, biases, output, conv_info, weights_info);
+ _function = std::move(f);
+ break;
}
+ default:
+ ARM_COMPUTE_ERROR("Not supported.");
+ break;
}
}
-void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, weights);
- ARM_COMPUTE_ERROR_ON(weights_info.are_reshaped() && CLScheduler::get().target() == GPUTarget::BIFROST);
- ARM_COMPUTE_ERROR_ON(!weights_info.are_reshaped() && weights->info()->dimension(2) != input->info()->dimension(2));
- ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
- ARM_COMPUTE_ERROR_ON(weights_info.are_reshaped() && is_data_type_quantized_asymmetric(input->info()->data_type()));
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
+ //Configure if the parameters match the direct convolution or the gemm-based
+ const GPUTarget gpu_target = CLScheduler::get().target();
- if(biases != nullptr)
+ switch(CLConvolutionLayer::get_convolution_method(input, weights, biases, output, conv_info, weights_info, gpu_target))
{
- if(_is_quantized)
+ case ConvolutionMethod::DIRECT:
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
+ // Validate direct convolution layer
+ CLDirectConvolutionLayerKernel::validate(input, weights, biases, output, conv_info, gpu_target);
+ break;
}
- else
+ case ConvolutionMethod::GEMM:
{
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
+ // Validate gemm-based convolution layer
+ /* TODO COMPMID-754: Add validation methods for CLGEMMConvolutionLayer
+ CLGEMMConvolutionLayerKernel::validate(input, weights, biases, output, conv_info, weights_info); */
+ break;
}
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, biases);
- ARM_COMPUTE_ERROR_ON(!weights_info.are_reshaped() && biases->info()->dimension(0) != weights->info()->dimension(3));
- ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 1);
+ default:
+ ARM_COMPUTE_ERROR("Not supported.");
+ break;
}
- const DataType dt = input->info()->data_type();
-
- // Set the GPU target for matrix multiply and im2col and col2im
- _mm_kernel.set_target(CLScheduler::get().target());
- _im2col_kernel.set_target(CLScheduler::get().target());
- _col2im_kernel.set_target(CLScheduler::get().target());
-
- const bool append_bias = (biases != nullptr) && (!_is_quantized);
- _are_weights_reshaped = weights_info.are_reshaped();
-
- const unsigned bias_element = (append_bias) ? 1 : 0;
- const ICLTensor *biases_to_use = (append_bias) ? biases : nullptr;
-
- // Get parameters from conv_info
- unsigned int stride_x = 0;
- unsigned int stride_y = 0;
- std::tie(stride_x, stride_y) = conv_info.stride();
-
- // Get convolved dimensions
- unsigned int conv_w = 0;
- unsigned int conv_h = 0;
-
- const unsigned int kernel_width = (_are_weights_reshaped) ? weights_info.kernel_size().first : weights->info()->dimension(0);
- const unsigned int kernel_height = (_are_weights_reshaped) ? weights_info.kernel_size().second : weights->info()->dimension(1);
- std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width, kernel_height,
- conv_info);
-
- // Check if its a "fully connected" convolution
- const bool is_fully_connected_convolution = ((conv_w == 1) && (conv_h == 1));
- _is_interleaved_transposed = (!is_fully_connected_convolution) && (!_is_quantized) && (_are_weights_reshaped);
-
- unsigned int mat_weights_cols = weights->info()->dimension(3);
- unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + bias_element;
-
- // Reshape weights if needed
- if(_are_weights_reshaped)
- {
- if(is_fully_connected_convolution || _is_quantized)
- {
- mat_weights_cols = weights->info()->dimension(0);
- mat_weights_rows = weights->info()->dimension(1);
- }
- else
- {
- mat_weights_cols = weights_info.num_kernels();
- const unsigned int quarter_reshaped_cols = weights->info()->dimension(0) / 4;
- mat_weights_rows = quarter_reshaped_cols + bias_element;
- }
- }
- else
- {
- // _weights_reshaped will be auto configured in the kernel.
- // Just append biases and do not transpose 1xW as it will be reshaped in CLGEMM
- _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, false);
-
- weights = &_weights_reshaped;
- }
-
- // Create tensor to store im2col reshaped inputs
- const unsigned int mat_input_cols = mat_weights_rows;
- const unsigned int mat_input_rows = conv_w * conv_h;
- TensorShape shape_im2col = input->info()->tensor_shape();
- shape_im2col.set(0, mat_input_cols);
- shape_im2col.set(1, mat_input_rows);
- shape_im2col.set(2, 1);
- // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
- TensorInfo im2col_reshaped_info(shape_im2col, 1, dt, input->info()->fixed_point_position());
- im2col_reshaped_info.set_quantization_info(input->info()->quantization_info());
- _im2col_output.allocator()->init(im2col_reshaped_info);
- _memory_group.manage(&_im2col_output);
-
- // Create GEMM output tensor
- TensorShape shape_gemm = _im2col_output.info()->tensor_shape();
- shape_gemm.set(0, mat_weights_cols);
- shape_gemm.set(1, mat_input_rows);
- const DataType gemm_data_type = _is_quantized ? DataType::S32 : dt;
- // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
- // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
- TensorInfo info_gemm(shape_gemm, 1, gemm_data_type, input->info()->fixed_point_position());
- info_gemm.set_quantization_info(output->info()->quantization_info());
- _gemm_output.allocator()->init(info_gemm);
- _memory_group.manage(&_gemm_output);
-
- // Configure im2col
- _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias);
-
- // Configure matrix multiply
- if(_is_interleaved_transposed)
- {
- // Configure GEMMInterleave4x4. _input_interleaved_reshaped will be auto configured in the kernel
- _memory_group.manage(&_interleave_output);
- _interleave_kernel.configure(&_im2col_output, &_interleave_output);
-
- // Configure GEMM
- configure_mm(&_interleave_output, weights, &_gemm_output, true, _are_weights_reshaped);
- _interleave_output.allocator()->allocate();
- }
- else
- {
- configure_mm(&_im2col_output, weights, &_gemm_output, false, _are_weights_reshaped);
- }
- _im2col_output.allocator()->allocate();
-
- // Configure output stage for quantized case
- if(_is_quantized)
- {
- float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
- int output_multiplier, output_shift;
- quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
- _memory_group.manage(&_tmp_output);
- _gemmlowp_output_stage.configure(&_gemm_output, biases, &_tmp_output, output_multiplier, output_shift, output->info()->quantization_info().offset);
- }
-
- // Configure Col2Im
- _col2im_kernel.configure(_is_quantized ? &_tmp_output : &_gemm_output, output, std::make_pair(conv_w, conv_h));
- if(_is_quantized)
- {
- _tmp_output.allocator()->allocate();
- }
- _gemm_output.allocator()->allocate();
+ return Status{};
+}
- ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
+ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info, const GPUTarget gpu_target)
+{
+ ARM_COMPUTE_UNUSED(input);
+ ARM_COMPUTE_UNUSED(biases);
+ ARM_COMPUTE_UNUSED(output);
+ ARM_COMPUTE_UNUSED(conv_info);
+ ARM_COMPUTE_UNUSED(weights_info);
- // Allocate intermediate tensor
- if(!_are_weights_reshaped)
+ if((gpu_target == GPUTarget::BIFROST) && (weights->dimension(0) == 5) && (weights->dimension(1) == 5))
{
- _weights_reshaped.allocator()->allocate();
+ return ConvolutionMethod::DIRECT;
}
+ return ConvolutionMethod::GEMM;
}
void CLConvolutionLayer::run()
{
- // Run weights reshaping (Runs once for every configure)
- if(!_are_weights_reshaped)
- {
- _are_weights_reshaped = true;
- _reshape_weights.run();
- }
-
- _memory_group.acquire();
-
- // Run im2col
- CLScheduler::get().enqueue(_im2col_kernel);
-
- // Note: _is_interleaved_transposed is true only if the weights passed to the function have been passed already reshaped
- // and if we do not have QASYMM8 data type. If this flag is true, we need to run the
- // gemm kernel instead of gemm function
- if(_is_interleaved_transposed)
- {
- // Run interleave4x4 kernel
- CLScheduler::get().enqueue(_interleave_kernel);
-
- // Run matrix multiply kernel
- CLScheduler::get().enqueue(_mm_kernel);
- }
- else
- {
- // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
- if(_is_quantized)
- {
- // Run gemmlowp
- _mm_gemmlowp.run();
-
- // Run output stage
- _gemmlowp_output_stage.run();
- }
- else
- {
- // Run gemm
- _mm_gemm.run();
- }
- }
-
- // Reshape output matrix
- CLScheduler::get().enqueue(_col2im_kernel, false);
-
- _memory_group.release();
+ _function->run();
}
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
new file mode 100644
index 0000000000..c4cfe1e24c
--- /dev/null
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
+
+#include "arm_compute/core/PixelValue.h"
+#include "arm_compute/core/Size2D.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+#include <cmath>
+#include <memory>
+#include <tuple>
+
+using namespace arm_compute;
+
+CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _weights_reshape_kernel(), _weights_transposed_kernel(), _weights_reshaped(), _transpose1xW(false)
+{
+}
+
+void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose1xW)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
+ ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
+
+ if(biases != nullptr)
+ {
+ ARM_COMPUTE_ERROR_ON(is_data_type_quantized_asymmetric(weights->info()->data_type()));
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
+ ARM_COMPUTE_ERROR_ON(biases->info()->dimension(0) != weights->info()->dimension(3));
+ ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 1);
+ }
+
+ const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
+ const unsigned bias_element = (append_biases) ? 1 : 0;
+ const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
+
+ _transpose1xW = transpose1xW;
+
+ if(transpose1xW)
+ {
+ // Create tensor to store the reshaped weights
+ const unsigned int mat_weights_cols = weights->info()->dimension(3);
+ const unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + bias_element;
+ TensorShape shape_wr(mat_weights_cols, mat_weights_rows);
+ const DataType dt = weights->info()->data_type();
+ const int fixed_point_position = weights->info()->fixed_point_position();
+ TensorInfo info_wr(shape_wr, 1, dt, fixed_point_position);
+
+ _weights_reshaped.allocator()->init(info_wr);
+ _memory_group.manage(&_weights_reshaped);
+ _weights_reshape_kernel.configure(weights, biases_to_use, &_weights_reshaped);
+ _weights_transposed_kernel.configure(&_weights_reshaped, output);
+ _weights_reshaped.allocator()->allocate();
+ }
+ else
+ {
+ _weights_reshape_kernel.configure(weights, biases_to_use, output);
+ }
+
+ output->info()->set_quantization_info(weights->info()->quantization_info());
+}
+
+void CLConvolutionLayerReshapeWeights::run()
+{
+ _memory_group.acquire();
+
+ CLScheduler::get().enqueue(_weights_reshape_kernel);
+ if(_transpose1xW)
+ {
+ CLScheduler::get().enqueue(_weights_transposed_kernel);
+ }
+
+ _memory_group.release();
+}
+
+CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _interleave_kernel(), _mm_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
+ _col2im_kernel(), _im2col_output(), _interleave_output(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _are_weights_reshaped(false), _is_quantized(false),
+ _is_interleaved_transposed(false)
+{
+}
+
+void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed, bool are_weights_reshaped)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
+ if(_is_quantized)
+ {
+ if(are_weights_reshaped)
+ {
+ ARM_COMPUTE_ERROR("Weights already reshaped are not suppported with gemmlowp");
+ }
+ else
+ {
+ // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
+ // Extract and negate input and weights offset
+ const QuantizationInfo input_quantization_info = input->info()->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
+
+ input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
+ weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
+
+ _mm_gemmlowp.configure(input, weights, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/));
+
+ // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
+ input->info()->set_quantization_info(input_quantization_info);
+ weights->info()->set_quantization_info(weights_quantization_info);
+ }
+ }
+ else
+ {
+ if(are_weights_reshaped)
+ {
+ // Configure matrix multiply kernel
+ _mm_kernel.configure(input, weights, output, 1.f, is_interleaved_transposed);
+ }
+ else
+ {
+ // Configure matrix multiply function
+ _mm_gemm.configure(input, weights, nullptr, output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/));
+ }
+ }
+}
+
+void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, weights);
+ ARM_COMPUTE_ERROR_ON(weights_info.are_reshaped() && CLScheduler::get().target() == GPUTarget::BIFROST);
+ ARM_COMPUTE_ERROR_ON(!weights_info.are_reshaped() && weights->info()->dimension(2) != input->info()->dimension(2));
+ ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
+ ARM_COMPUTE_ERROR_ON(weights_info.are_reshaped() && is_data_type_quantized_asymmetric(input->info()->data_type()));
+
+ _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
+
+ if(biases != nullptr)
+ {
+ if(_is_quantized)
+ {
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
+ }
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, biases);
+ ARM_COMPUTE_ERROR_ON(!weights_info.are_reshaped() && biases->info()->dimension(0) != weights->info()->dimension(3));
+ ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 1);
+ }
+
+ const DataType dt = input->info()->data_type();
+
+ // Set the GPU target for matrix multiply and im2col and col2im
+ _mm_kernel.set_target(CLScheduler::get().target());
+ _im2col_kernel.set_target(CLScheduler::get().target());
+ _col2im_kernel.set_target(CLScheduler::get().target());
+
+ const bool append_bias = (biases != nullptr) && (!_is_quantized);
+ _are_weights_reshaped = weights_info.are_reshaped();
+
+ const unsigned bias_element = (append_bias) ? 1 : 0;
+ const ICLTensor *biases_to_use = (append_bias) ? biases : nullptr;
+
+ // Get parameters from conv_info
+ unsigned int stride_x = 0;
+ unsigned int stride_y = 0;
+ std::tie(stride_x, stride_y) = conv_info.stride();
+
+ // Get convolved dimensions
+ unsigned int conv_w = 0;
+ unsigned int conv_h = 0;
+
+ const unsigned int kernel_width = (_are_weights_reshaped) ? weights_info.kernel_size().first : weights->info()->dimension(0);
+ const unsigned int kernel_height = (_are_weights_reshaped) ? weights_info.kernel_size().second : weights->info()->dimension(1);
+ std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width, kernel_height,
+ conv_info);
+
+ // Check if its a "fully connected" convolution
+ const bool is_fully_connected_convolution = ((conv_w == 1) && (conv_h == 1));
+ _is_interleaved_transposed = (!is_fully_connected_convolution) && (!_is_quantized) && (_are_weights_reshaped);
+
+ unsigned int mat_weights_cols = weights->info()->dimension(3);
+ unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + bias_element;
+
+ // Reshape weights if needed
+ if(_are_weights_reshaped)
+ {
+ if(is_fully_connected_convolution || _is_quantized)
+ {
+ mat_weights_cols = weights->info()->dimension(0);
+ mat_weights_rows = weights->info()->dimension(1);
+ }
+ else
+ {
+ mat_weights_cols = weights_info.num_kernels();
+ const unsigned int quarter_reshaped_cols = weights->info()->dimension(0) / 4;
+ mat_weights_rows = quarter_reshaped_cols + bias_element;
+ }
+ }
+ else
+ {
+ // _weights_reshaped will be auto configured in the kernel.
+ // Just append biases and do not transpose 1xW as it will be reshaped in CLGEMM
+ _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, false);
+
+ weights = &_weights_reshaped;
+ }
+
+ // Create tensor to store im2col reshaped inputs
+ const unsigned int mat_input_cols = mat_weights_rows;
+ const unsigned int mat_input_rows = conv_w * conv_h;
+ TensorShape shape_im2col = input->info()->tensor_shape();
+ shape_im2col.set(0, mat_input_cols);
+ shape_im2col.set(1, mat_input_rows);
+ shape_im2col.set(2, 1);
+ // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
+ TensorInfo im2col_reshaped_info(shape_im2col, 1, dt, input->info()->fixed_point_position());
+ im2col_reshaped_info.set_quantization_info(input->info()->quantization_info());
+ _im2col_output.allocator()->init(im2col_reshaped_info);
+ _memory_group.manage(&_im2col_output);
+
+ // Create GEMM output tensor
+ TensorShape shape_gemm = _im2col_output.info()->tensor_shape();
+ shape_gemm.set(0, mat_weights_cols);
+ shape_gemm.set(1, mat_input_rows);
+ const DataType gemm_data_type = _is_quantized ? DataType::S32 : dt;
+ // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
+ // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
+ TensorInfo info_gemm(shape_gemm, 1, gemm_data_type, input->info()->fixed_point_position());
+ info_gemm.set_quantization_info(output->info()->quantization_info());
+ _gemm_output.allocator()->init(info_gemm);
+ _memory_group.manage(&_gemm_output);
+
+ // Configure im2col
+ _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias);
+
+ // Configure matrix multiply
+ if(_is_interleaved_transposed)
+ {
+ // Configure GEMMInterleave4x4. _input_interleaved_reshaped will be auto configured in the kernel
+ _memory_group.manage(&_interleave_output);
+ _interleave_kernel.configure(&_im2col_output, &_interleave_output);
+
+ // Configure GEMM
+ configure_mm(&_interleave_output, weights, &_gemm_output, true, _are_weights_reshaped);
+ _interleave_output.allocator()->allocate();
+ }
+ else
+ {
+ configure_mm(&_im2col_output, weights, &_gemm_output, false, _are_weights_reshaped);
+ }
+ _im2col_output.allocator()->allocate();
+
+ // Configure output stage for quantized case
+ if(_is_quantized)
+ {
+ float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
+ int output_multiplier, output_shift;
+ quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+ _memory_group.manage(&_tmp_output);
+ _gemmlowp_output_stage.configure(&_gemm_output, biases, &_tmp_output, output_multiplier, output_shift, output->info()->quantization_info().offset);
+ }
+
+ // Configure Col2Im
+ _col2im_kernel.configure(_is_quantized ? &_tmp_output : &_gemm_output, output, std::make_pair(conv_w, conv_h));
+ if(_is_quantized)
+ {
+ _tmp_output.allocator()->allocate();
+ }
+ _gemm_output.allocator()->allocate();
+
+ ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
+
+ // Allocate intermediate tensor
+ if(!_are_weights_reshaped)
+ {
+ _weights_reshaped.allocator()->allocate();
+ }
+}
+
+void CLGEMMConvolutionLayer::run()
+{
+ // Run weights reshaping (Runs once for every configure)
+ if(!_are_weights_reshaped)
+ {
+ _are_weights_reshaped = true;
+ _reshape_weights.run();
+ }
+
+ _memory_group.acquire();
+
+ // Run im2col
+ CLScheduler::get().enqueue(_im2col_kernel);
+
+ // Note: _is_interleaved_transposed is true only if the weights passed to the function have been passed already reshaped
+ // and if we do not have QASYMM8 data type. If this flag is true, we need to run the
+ // gemm kernel instead of gemm function
+ if(_is_interleaved_transposed)
+ {
+ // Run interleave4x4 kernel
+ CLScheduler::get().enqueue(_interleave_kernel);
+
+ // Run matrix multiply kernel
+ CLScheduler::get().enqueue(_mm_kernel);
+ }
+ else
+ {
+ // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
+ if(_is_quantized)
+ {
+ // Run gemmlowp
+ _mm_gemmlowp.run();
+
+ // Run output stage
+ _gemmlowp_output_stage.run();
+ }
+ else
+ {
+ // Run gemm
+ _mm_gemm.run();
+ }
+ }
+
+ // Reshape output matrix
+ CLScheduler::get().enqueue(_col2im_kernel, false);
+
+ _memory_group.release();
+}
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 46cb097986..b7f9241c88 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/PaddingCalculator.h"
#include "tests/datasets/LargeConvolutionLayerDataset.h"
@@ -64,6 +65,57 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
TEST_SUITE(CL)
TEST_SUITE(ConvolutionLayer)
+DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0)
+ }),
+ framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0)
+ })),
+ framework::dataset::make("BiasesInfo", { TensorInfo(TensorShape(19U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(19U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(16U), 1, DataType::F32, 0)
+ })),
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0)
+ })),
+ framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
+ PadStrideInfo(1, 2, 1, 1),
+ PadStrideInfo(1, 1, 0, 0),
+ PadStrideInfo(2, 1, 0, 0),
+ PadStrideInfo(3, 2, 1, 0)
+ })),
+ framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
+ GPUTarget::MIDGARD,
+ GPUTarget::G70,
+ GPUTarget::MIDGARD,
+ GPUTarget::BIFROST
+ })),
+
+ framework::dataset::make("Expected", { ConvolutionMethod::DIRECT, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::DIRECT })),
+ input_info, weights_info, biases_info, output_info, conv_info, gpu_target, expected)
+{
+ ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(false),
+ &weights_info.clone()->set_is_resizable(false),
+ &biases_info.clone()->set_is_resizable(false),
+ &output_info.clone()->set_is_resizable(false), conv_info, WeightsInfo(), gpu_target);
+ ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(GEMMConvolutionLayer)
+
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()), CNNDataTypes),
input_shape, weights_shape, bias_shape, output_shape, info, data_type)
{
@@ -87,7 +139,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
// Create and configure function
- CLConvolutionLayer conv;
+ CLGEMMConvolutionLayer conv;
conv.configure(&src, &weights, &bias, &dst, info);
// Validate valid region
@@ -110,22 +162,22 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
}
template <typename T>
-using CLConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLConvolutionLayer, T>;
+using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F16)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::F16)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F16)))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::F16)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
@@ -133,18 +185,18 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixture<half>, framework::Dat
TEST_SUITE_END()
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::F32)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::F32)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -153,25 +205,25 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLConvolutionLayer, T>;
+using CLGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, CLConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::QS8)),
+ framework::dataset::make("FractionalBits", 4, 7)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::QS8)),
+ framework::dataset::make("FractionalBits", 4, 7)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
@@ -180,7 +232,7 @@ TEST_SUITE_END()
TEST_SUITE(QS16)
// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType",
DataType::QS16)),
@@ -189,11 +241,11 @@ FIXTURE_DATA_TEST_CASE(RunTiny, CLConvolutionLayerFixedPointFixture<int16_t>, fr
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
@@ -202,11 +254,11 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLConvolutionLayer, T>;
+using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
@@ -214,10 +266,10 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerQuantizedFixture<uint8_t>, fr
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 52699b67de..63fba35052 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_TEST_TYPE_PRINTER_H__
#define __ARM_COMPUTE_TEST_TYPE_PRINTER_H__
+#include "arm_compute/core/CL/CLTypes.h"
#include "arm_compute/core/Dimensions.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/HOGInfo.h"
@@ -932,5 +933,70 @@ inline std::string to_string(const HOGInfo &type)
return str.str();
}
+inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionMethod &conv_method)
+{
+ switch(conv_method)
+ {
+ case ConvolutionMethod::GEMM:
+ os << "GEMM";
+ break;
+ case ConvolutionMethod::DIRECT:
+ os << "DIRECT";
+ break;
+ case ConvolutionMethod::WINOGRAD:
+ os << "WINOGRAD";
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+
+ return os;
+}
+
+inline std::string to_string(const ConvolutionMethod &conv_method)
+{
+ std::stringstream str;
+ str << conv_method;
+ return str.str();
+}
+
+inline ::std::ostream &operator<<(::std::ostream &os, const GPUTarget &gpu_target)
+{
+ switch(gpu_target)
+ {
+ case GPUTarget::GPU_ARCH_MASK:
+ os << "GPU_ARCH_MASK";
+ break;
+ case GPUTarget::MIDGARD:
+ os << "MIDGARD";
+ break;
+ case GPUTarget::BIFROST:
+ os << "BIFROST";
+ break;
+ case GPUTarget::T600:
+ os << "T600";
+ break;
+ case GPUTarget::T700:
+ os << "T700";
+ break;
+ case GPUTarget::T800:
+ os << "T800";
+ break;
+ case GPUTarget::G70:
+ os << "G70";
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+
+ return os;
+}
+
+inline std::string to_string(const GPUTarget &gpu_target)
+{
+ std::stringstream str;
+ str << gpu_target;
+ return str.str();
+}
} // namespace arm_compute
#endif /* __ARM_COMPUTE_TEST_TYPE_PRINTER_H__ */