aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/gpu/cl/operators
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/gpu/cl/operators')
-rw-r--r--src/runtime/gpu/cl/operators/ClConv2d.cpp6
-rw-r--r--src/runtime/gpu/cl/operators/ClConv2d.h2
-rw-r--r--src/runtime/gpu/cl/operators/ClGemmConv2d.cpp (renamed from src/runtime/gpu/cl/operators/ClGemmConvolution.cpp)40
-rw-r--r--src/runtime/gpu/cl/operators/ClGemmConv2d.h (renamed from src/runtime/gpu/cl/operators/ClGemmConvolution.h)20
4 files changed, 34 insertions, 34 deletions
diff --git a/src/runtime/gpu/cl/operators/ClConv2d.cpp b/src/runtime/gpu/cl/operators/ClConv2d.cpp
index 2f4d673d9c..0cb3a968e6 100644
--- a/src/runtime/gpu/cl/operators/ClConv2d.cpp
+++ b/src/runtime/gpu/cl/operators/ClConv2d.cpp
@@ -31,7 +31,7 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
#include "src/runtime/gpu/cl/operators/ClDirectConv2d.h"
-#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
+#include "src/runtime/gpu/cl/operators/ClGemmConv2d.h"
#include "src/runtime/gpu/cl/operators/ClWinogradConv2d.h"
#include <memory>
@@ -104,7 +104,7 @@ void ClConv2d::configure(const CLCompileContext &compile_context, ITensorInfo *s
}
case ConvolutionMethod::GEMM:
{
- auto f = std::make_unique<ClGemmConvolution>();
+ auto f = std::make_unique<ClGemmConv2d>();
f->configure(compile_context, src, weights, biases, dst, conv2d_info, weights_info);
_operator = std::move(f);
break;
@@ -143,7 +143,7 @@ Status ClConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, co
case ConvolutionMethod::GEMM:
{
// Validate gemm-based convolution layer
- ARM_COMPUTE_RETURN_ON_ERROR(ClGemmConvolution::validate(src, weights, biases, dst, conv2d_info, weights_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(ClGemmConv2d::validate(src, weights, biases, dst, conv2d_info, weights_info));
break;
}
default:
diff --git a/src/runtime/gpu/cl/operators/ClConv2d.h b/src/runtime/gpu/cl/operators/ClConv2d.h
index 0888c2f47b..cdf3b7df32 100644
--- a/src/runtime/gpu/cl/operators/ClConv2d.h
+++ b/src/runtime/gpu/cl/operators/ClConv2d.h
@@ -36,7 +36,7 @@ namespace opencl
{
/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
*
- * -# @ref opencl::ClGemmConvolution
+ * -# @ref opencl::ClGemmConv2d
* -# @ref opencl::ClWinogradConv2d
* -# @ref opencl::ClDirectConv2d
* -# @ref CLFFTConvolutionLayer
diff --git a/src/runtime/gpu/cl/operators/ClGemmConvolution.cpp b/src/runtime/gpu/cl/operators/ClGemmConv2d.cpp
index 1926cbbe4d..8c796e0712 100644
--- a/src/runtime/gpu/cl/operators/ClGemmConvolution.cpp
+++ b/src/runtime/gpu/cl/operators/ClGemmConv2d.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
+#include "src/runtime/gpu/cl/operators/ClGemmConv2d.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/PixelValue.h"
@@ -50,16 +50,16 @@ using namespace misc::shape_calculator;
using namespace utils::cast;
namespace opencl
{
-ClGemmConvolution::ClGemmConvolution()
+ClGemmConv2d::ClGemmConv2d()
: _weights_reshape_kernel(nullptr), _im2col_kernel(nullptr), _mm_gemm(nullptr), _mm_gemmlowp(nullptr), _col2im_kernel(nullptr), _activation_kernel(nullptr), _im2col_output(), _weights_reshaped(),
_gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _append_bias(false), _is_prepared(false), _aux_mem(AuxTensorIdx::Count)
{
}
-ClGemmConvolution::~ClGemmConvolution() = default;
+ClGemmConv2d::~ClGemmConv2d() = default;
-void ClGemmConvolution::configure_mm(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
- const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
- int gemm_3d_depth, const ActivationLayerInfo &act_info)
+void ClGemmConv2d::configure_mm(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
+ int gemm_3d_depth, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights);
ARM_COMPUTE_ERROR_THROW_ON(validate_mm(src, weights, biases, dst, gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
@@ -112,8 +112,8 @@ void ClGemmConvolution::configure_mm(const ClCompileContext &compile_context, co
}
}
-Status ClGemmConvolution::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
- const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
+Status ClGemmConv2d::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+ const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
{
const bool is_quantized = is_data_type_quantized_asymmetric(src->data_type());
@@ -151,14 +151,14 @@ Status ClGemmConvolution::validate_mm(const ITensorInfo *src, const ITensorInfo
}
}
-void ClGemmConvolution::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
- const Conv2dInfo &conv2d_info, const WeightsInfo &weights_info)
+void ClGemmConv2d::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const Conv2dInfo &conv2d_info, const WeightsInfo &weights_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
- ARM_COMPUTE_ERROR_THROW_ON(ClGemmConvolution::validate(src, weights, biases, dst,
- conv2d_info,
- weights_info));
+ ARM_COMPUTE_ERROR_THROW_ON(ClGemmConv2d::validate(src, weights, biases, dst,
+ conv2d_info,
+ weights_info));
const DataType data_type = src->data_type();
const DataLayout data_layout = src->data_layout();
@@ -334,8 +334,8 @@ void ClGemmConvolution::configure(const CLCompileContext &compile_context, ITens
_aux_mem[GemmOutput] = MemoryInfo(offset_int_vec(GemmOutput), MemoryLifetime::Temporary, _gemm_output.total_size());
}
-Status ClGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &conv2d_info,
- const WeightsInfo &weights_info)
+Status ClGemmConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &conv2d_info,
+ const WeightsInfo &weights_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
@@ -371,8 +371,8 @@ Status ClGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *we
const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv2d_info.conv_info.stride().first == 1
&& conv2d_info.conv_info.stride().second == 1);
- const bool skip_col2im = data_layout == DataLayout::NHWC;
- bool fuse_activation = true;
+ const bool skip_col2im = data_layout == DataLayout::NHWC;
+ bool fuse_activation = true;
ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * conv2d_info.num_groups) != src->dimension(idx_channel));
ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
@@ -521,7 +521,7 @@ Status ClGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *we
return Status{};
}
-void ClGemmConvolution::run(ITensorPack &tensors)
+void ClGemmConv2d::run(ITensorPack &tensors)
{
prepare(tensors);
@@ -593,7 +593,7 @@ void ClGemmConvolution::run(ITensorPack &tensors)
}
}
-void ClGemmConvolution::prepare(ITensorPack &tensors)
+void ClGemmConv2d::prepare(ITensorPack &tensors)
{
if(!_is_prepared)
{
@@ -620,7 +620,7 @@ void ClGemmConvolution::prepare(ITensorPack &tensors)
_is_prepared = true;
}
}
-experimental::MemoryRequirements ClGemmConvolution::workspace() const
+experimental::MemoryRequirements ClGemmConv2d::workspace() const
{
return _aux_mem;
}
diff --git a/src/runtime/gpu/cl/operators/ClGemmConvolution.h b/src/runtime/gpu/cl/operators/ClGemmConv2d.h
index 444516eaaa..e16d029e71 100644
--- a/src/runtime/gpu/cl/operators/ClGemmConvolution.h
+++ b/src/runtime/gpu/cl/operators/ClGemmConv2d.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CL_GEMMCONVOLUTION_H
-#define ARM_COMPUTE_CL_GEMMCONVOLUTION_H
+#ifndef ARM_COMPUTE_CL_GEMM_CONV2D_H
+#define ARM_COMPUTE_CL_GEMM_CONV2D_H
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
@@ -55,21 +55,21 @@ class ClActivationKernel;
* -# @ref opencl::kernels::ClCol2ImKernel (if NCHW data layout)
* -# @ref opencl::kernels::ClActivationKernel
*/
-class ClGemmConvolution : public IClOperator
+class ClGemmConv2d : public IClOperator
{
public:
/** Constructor */
- ClGemmConvolution();
+ ClGemmConv2d();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- ClGemmConvolution(const ClGemmConvolution &) = delete;
+ ClGemmConv2d(const ClGemmConv2d &) = delete;
/** Default move constructor */
- ClGemmConvolution(ClGemmConvolution &&) = default;
+ ClGemmConv2d(ClGemmConv2d &&) = default;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- ClGemmConvolution &operator=(const ClGemmConvolution &) = delete;
+ ClGemmConv2d &operator=(const ClGemmConv2d &) = delete;
/** Default move assignment operator */
- ClGemmConvolution &operator=(ClGemmConvolution &&) = default;
+ ClGemmConv2d &operator=(ClGemmConv2d &&) = default;
/**Default destructor */
- ~ClGemmConvolution();
+ ~ClGemmConv2d();
/** Set the input and output tensors.
*
* Valid data layouts:
@@ -182,4 +182,4 @@ private:
};
} // namespace opencl
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMMCONVOLUTION_H */
+#endif /* ARM_COMPUTE_CL_GEMM_CONV2D_H */