aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2021-08-16 12:38:54 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-08-20 16:26:16 +0000
commit19884630c37ae9de2f65a88ea2cda5630a551bad (patch)
tree0fdffe84c5d66ffe8a1d320b798a247afa1dfdae
parent73df9310f4e94e43597f283307e3cde0653d8bae (diff)
downloadComputeLibrary-19884630c37ae9de2f65a88ea2cda5630a551bad.tar.gz
Rename [Cl|Cpu]GemmConvolution to [Cl|Gpu]GemmConv2d
Renaming the gemm-based convolution operators to accomodate for new operators with higher convolution dimensonality Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: Id2f2cf11404221f0e87baa0e5d08ad5d63eaf78e Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6113 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp4
-rw-r--r--arm_compute/runtime/CL/functions/CLConvolutionLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h2
-rw-r--r--filelist.json4
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp24
-rw-r--r--src/runtime/NEON/functions/NEConvolutionLayer.cpp2
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp22
-rw-r--r--src/runtime/cpu/operators/CpuConv2d.cpp6
-rw-r--r--src/runtime/cpu/operators/CpuGemmConv2d.cpp (renamed from src/runtime/cpu/operators/CpuGemmConvolution.cpp)50
-rw-r--r--src/runtime/cpu/operators/CpuGemmConv2d.h (renamed from src/runtime/cpu/operators/CpuGemmConvolution.h)20
-rw-r--r--src/runtime/gpu/cl/operators/ClConv2d.cpp6
-rw-r--r--src/runtime/gpu/cl/operators/ClConv2d.h2
-rw-r--r--src/runtime/gpu/cl/operators/ClGemmConv2d.cpp (renamed from src/runtime/gpu/cl/operators/ClGemmConvolution.cpp)40
-rw-r--r--src/runtime/gpu/cl/operators/ClGemmConv2d.h (renamed from src/runtime/gpu/cl/operators/ClGemmConvolution.h)20
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp6
16 files changed, 107 insertions, 107 deletions
diff --git a/Android.bp b/Android.bp
index 3a3f1db334..c1adf32893 100644
--- a/Android.bp
+++ b/Android.bp
@@ -647,7 +647,7 @@ cc_library_static {
"src/runtime/cpu/operators/CpuFloor.cpp",
"src/runtime/cpu/operators/CpuFullyConnected.cpp",
"src/runtime/cpu/operators/CpuGemm.cpp",
- "src/runtime/cpu/operators/CpuGemmConvolution.cpp",
+ "src/runtime/cpu/operators/CpuGemmConv2d.cpp",
"src/runtime/cpu/operators/CpuGemmDirectConv2d.cpp",
"src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp",
"src/runtime/cpu/operators/CpuGemmLowpOutputStage.cpp",
@@ -679,7 +679,7 @@ cc_library_static {
"src/runtime/gpu/cl/operators/ClFloor.cpp",
"src/runtime/gpu/cl/operators/ClFullyConnected.cpp",
"src/runtime/gpu/cl/operators/ClGemm.cpp",
- "src/runtime/gpu/cl/operators/ClGemmConvolution.cpp",
+ "src/runtime/gpu/cl/operators/ClGemmConv2d.cpp",
"src/runtime/gpu/cl/operators/ClGemmLowpMatrixMultiplyCore.cpp",
"src/runtime/gpu/cl/operators/ClGemmLowpOutputStage.cpp",
"src/runtime/gpu/cl/operators/ClLogicalNot.cpp",
diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
index 12b3ca1fd2..0f092bdbc2 100644
--- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
@@ -24,9 +24,9 @@
#ifndef ARM_COMPUTE_CLCONVOLUTIONLAYER_H
#define ARM_COMPUTE_CLCONVOLUTIONLAYER_H
-#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/core/CL/CLCompileContext.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
@@ -40,7 +40,7 @@ class ITensorInfo;
/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
*
- * -# @ref opencl::ClGemmConvolution
+ * -# @ref opencl::ClGemmConv2d
* -# @ref opencl::ClWinogradConv2d
* -# @ref opencl::ClDirectConv2d
* -# @ref CLFFTConvolutionLayer
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index 3075465ef7..d7a4e7f944 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -41,7 +41,7 @@ class ITensorInfo;
/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
*
- * -# @ref opencl::ClGemmConvolution
+ * -# @ref opencl::ClGemmConv2d
*/
class CLGEMMConvolutionLayer : public IFunction
{
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index fe866dd941..cf5fb82398 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -41,7 +41,7 @@ class ITensorInfo;
/** Basic function to compute the convolution layer. This function calls the following kernels/functions:
*
- * -# @ref cpu::CpuGemmConvolution
+ * -# @ref cpu::CpuGemmConv2d
*
*/
class NEGEMMConvolutionLayer : public IFunction
diff --git a/filelist.json b/filelist.json
index c5abc620cd..d9c67126c6 100644
--- a/filelist.json
+++ b/filelist.json
@@ -288,7 +288,7 @@
"files": {
"operator": [
"src/runtime/gpu/cl/operators/ClGemm.cpp",
- "src/runtime/gpu/cl/operators/ClGemmConvolution.cpp"
+ "src/runtime/gpu/cl/operators/ClGemmConv2d.cpp"
],
"kernel": [
"src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp",
@@ -1240,7 +1240,7 @@
],
"files": {
"operator": [
- "src/runtime/cpu/operators/CpuGemmConvolution.cpp"
+ "src/runtime/cpu/operators/CpuGemmConv2d.cpp"
],
"kernel": [
"src/core/cpu/kernels/CpuWeightsReshapeKernel.cpp"
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 75ca77dbe2..563dbd414f 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -32,7 +32,7 @@
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/helpers/MemoryHelpers.h"
-#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
+#include "src/runtime/gpu/cl/operators/ClGemmConv2d.h"
#include "support/Cast.h"
#include <cmath>
@@ -47,15 +47,15 @@ using namespace arm_compute::experimental;
struct CLGEMMConvolutionLayer::Impl
{
- const ITensor *weights{ nullptr };
- std::unique_ptr<opencl::ClGemmConvolution> op{ nullptr };
- ITensorPack run_pack{};
- ITensorPack prep_pack{};
- MemoryGroup memory_group{};
- IWeightsManager *weights_manager{ nullptr };
- MemoryRequirements aux_mem_req{};
- WorkspaceData<CLTensor> workspace_tensors{};
- bool is_prepared{ false };
+ const ITensor *weights{ nullptr };
+ std::unique_ptr<opencl::ClGemmConv2d> op{ nullptr };
+ ITensorPack run_pack{};
+ ITensorPack prep_pack{};
+ MemoryGroup memory_group{};
+ IWeightsManager *weights_manager{ nullptr };
+ MemoryRequirements aux_mem_req{};
+ WorkspaceData<CLTensor> workspace_tensors{};
+ bool is_prepared{ false };
};
CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
@@ -79,7 +79,7 @@ void CLGEMMConvolutionLayer::configure(const CLCompileContext &compile_context,
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
_impl->weights = weights;
- _impl->op = std::make_unique<opencl::ClGemmConvolution>();
+ _impl->op = std::make_unique<opencl::ClGemmConv2d>();
const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups);
_impl->op->configure(compile_context, input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv2d_info, weights_info);
@@ -103,7 +103,7 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
{
const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups);
- return opencl::ClGemmConvolution::validate(input, weights, biases, output, conv2d_info, weights_info);
+ return opencl::ClGemmConv2d::validate(input, weights, biases, output, conv2d_info, weights_info);
}
void CLGEMMConvolutionLayer::run()
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index 6e25b429d4..8bd1119a69 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -30,7 +30,7 @@
#include "src/core/helpers/MemoryHelpers.h"
#include "src/runtime/cpu/operators/CpuConv2d.h"
#include "src/runtime/cpu/operators/CpuDirectConv2d.h"
-#include "src/runtime/cpu/operators/CpuGemmConvolution.h"
+#include "src/runtime/cpu/operators/CpuGemmConv2d.h"
#include "src/runtime/cpu/operators/CpuGemmDirectConv2d.h"
#include "src/runtime/cpu/operators/CpuWinogradConv2d.h"
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index c32584ec0d..47ab16816a 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -28,7 +28,7 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/Tensor.h"
#include "src/core/helpers/MemoryHelpers.h"
-#include "src/runtime/cpu/operators/CpuGemmConvolution.h"
+#include "src/runtime/cpu/operators/CpuGemmConv2d.h"
using namespace arm_compute::experimental;
@@ -36,14 +36,14 @@ namespace arm_compute
{
struct NEGEMMConvolutionLayer::Impl
{
- const ITensor *weights{ nullptr };
- std::unique_ptr<cpu::CpuGemmConvolution> op{ nullptr };
- ITensorPack run_pack{};
- MemoryGroup memory_group{};
- IWeightsManager *weights_manager{ nullptr };
- MemoryRequirements aux_mem_req{};
- WorkspaceData<Tensor> workspace_tensors{};
- bool is_prepared{ false };
+ const ITensor *weights{ nullptr };
+ std::unique_ptr<cpu::CpuGemmConv2d> op{ nullptr };
+ ITensorPack run_pack{};
+ MemoryGroup memory_group{};
+ IWeightsManager *weights_manager{ nullptr };
+ MemoryRequirements aux_mem_req{};
+ WorkspaceData<Tensor> workspace_tensors{};
+ bool is_prepared{ false };
};
NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager, IWeightsManager *weights_manager)
@@ -59,7 +59,7 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
_impl->weights = weights;
- _impl->op = std::make_unique<cpu::CpuGemmConvolution>();
+ _impl->op = std::make_unique<cpu::CpuGemmConv2d>();
_impl->op->configure(input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
_impl->run_pack =
@@ -76,7 +76,7 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
{
- return cpu::CpuGemmConvolution::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
+ return cpu::CpuGemmConv2d::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
}
void NEGEMMConvolutionLayer::run()
diff --git a/src/runtime/cpu/operators/CpuConv2d.cpp b/src/runtime/cpu/operators/CpuConv2d.cpp
index 809663a918..cff9238308 100644
--- a/src/runtime/cpu/operators/CpuConv2d.cpp
+++ b/src/runtime/cpu/operators/CpuConv2d.cpp
@@ -26,7 +26,7 @@
#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
#include "src/runtime/cpu/operators/CpuDirectConv2d.h"
#include "src/runtime/cpu/operators/CpuGemm.h"
-#include "src/runtime/cpu/operators/CpuGemmConvolution.h"
+#include "src/runtime/cpu/operators/CpuGemmConv2d.h"
#include "src/runtime/cpu/operators/CpuGemmDirectConv2d.h"
#include "src/runtime/cpu/operators/CpuWinogradConv2d.h"
@@ -62,7 +62,7 @@ void CpuConv2d::configure(ITensorInfo *input, ITensorInfo *weights, const ITenso
}
case ConvolutionMethod::GEMM:
{
- auto f = std::make_unique<CpuGemmConvolution>();
+ auto f = std::make_unique<CpuGemmConv2d>();
f->configure(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math);
_function = std::move(f);
break;
@@ -101,7 +101,7 @@ Status CpuConv2d::validate(const ITensorInfo *input, const ITensorInfo *weights,
ARM_COMPUTE_RETURN_ON_ERROR(CpuWinogradConv2d::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math));
break;
case ConvolutionMethod::GEMM:
- ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmConvolution::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math));
+ ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmConv2d::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math));
break;
case ConvolutionMethod::GEMM_CONV2D:
ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmDirectConv2d::validate(input, weights, biases, output, info));
diff --git a/src/runtime/cpu/operators/CpuGemmConvolution.cpp b/src/runtime/cpu/operators/CpuGemmConv2d.cpp
index 81d656c905..a81dd8a661 100644
--- a/src/runtime/cpu/operators/CpuGemmConvolution.cpp
+++ b/src/runtime/cpu/operators/CpuGemmConv2d.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/runtime/cpu/operators/CpuGemmConvolution.h"
+#include "src/runtime/cpu/operators/CpuGemmConv2d.h"
#include "arm_compute/core/Size2D.h"
#include "arm_compute/core/TensorInfo.h"
@@ -51,15 +51,15 @@ namespace arm_compute
{
namespace cpu
{
-CpuGemmConvolution::CpuGemmConvolution()
+CpuGemmConv2d::CpuGemmConv2d()
: _weights_reshape_kernel(nullptr), _im2col_kernel(), _mm_gemm(), _mm_gemmlowp(), _col2im_kernel(), _reshape_kernel(), _im2col_output(), _weights_reshaped(), _gemm_output(), _gemm_output_3d(),
_data_layout(DataLayout::NCHW), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _is_prepared(false), _aux_mem(AuxTensorIdx::Count)
{
}
-CpuGemmConvolution::~CpuGemmConvolution() = default;
+CpuGemmConv2d::~CpuGemmConv2d() = default;
-void CpuGemmConvolution::configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act_info,
- bool enable_fast_math, int gemm_3d_depth)
+void CpuGemmConv2d::configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act_info,
+ bool enable_fast_math, int gemm_3d_depth)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights);
ARM_COMPUTE_ERROR_THROW_ON(validate_mm(src, weights, biases, dst, act_info, enable_fast_math, gemm_3d_depth, _skip_im2col));
@@ -137,8 +137,8 @@ void CpuGemmConvolution::configure_mm(const ITensorInfo *src, const ITensorInfo
}
}
-Status CpuGemmConvolution::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
- const ActivationLayerInfo &act_info, bool enable_fast_math, int gemm_3d_depth, bool skip_im2col)
+Status CpuGemmConv2d::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+ const ActivationLayerInfo &act_info, bool enable_fast_math, int gemm_3d_depth, bool skip_im2col)
{
const DataType data_type = src->data_type();
const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
@@ -197,7 +197,7 @@ Status CpuGemmConvolution::validate_mm(const ITensorInfo *src, const ITensorInfo
}
}
-Status CpuGemmConvolution::validate_gemm3d(const ITensorInfo *input_info, const ITensorInfo *weights_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col)
+Status CpuGemmConv2d::validate_gemm3d(const ITensorInfo *input_info, const ITensorInfo *weights_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col)
{
const DataType data_type = input_info->data_type();
const unsigned int mult_y = skip_im2col ? 1U : gemm_3d_depth;
@@ -211,21 +211,21 @@ Status CpuGemmConvolution::validate_gemm3d(const ITensorInfo *input_info, const
return validate_mm(&dummy_input_info, &dummy_weights_info, nullptr, &dummy_output_info, act_info, false, gemm_3d_depth, skip_im2col);
}
-void CpuGemmConvolution::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
+void CpuGemmConv2d::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
+ const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
ARM_COMPUTE_UNUSED(num_groups, weights_info);
- ARM_COMPUTE_ERROR_THROW_ON(CpuGemmConvolution::validate(src,
- weights,
- biases,
- dst,
- conv_info,
- weights_info,
- dilation,
- act_info,
- enable_fast_math,
- num_groups));
+ ARM_COMPUTE_ERROR_THROW_ON(CpuGemmConv2d::validate(src,
+ weights,
+ biases,
+ dst,
+ conv_info,
+ weights_info,
+ dilation,
+ act_info,
+ enable_fast_math,
+ num_groups));
const DataType data_type = src->data_type();
const DataLayout data_layout = src->data_layout();
@@ -353,8 +353,8 @@ void CpuGemmConvolution::configure(const ITensorInfo *src, const ITensorInfo *we
_aux_mem[GemmOutput] = MemoryInfo(offset_int_vec(GemmOutput), MemoryLifetime::Temporary, _gemm_output.total_size());
}
-Status CpuGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
+Status CpuGemmConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
@@ -489,7 +489,7 @@ Status CpuGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *w
return Status{};
}
-void CpuGemmConvolution::run(ITensorPack &tensors)
+void CpuGemmConv2d::run(ITensorPack &tensors)
{
prepare(tensors);
@@ -581,7 +581,7 @@ void CpuGemmConvolution::run(ITensorPack &tensors)
}
}
-void CpuGemmConvolution::prepare(ITensorPack &tensors)
+void CpuGemmConv2d::prepare(ITensorPack &tensors)
{
if(!_is_prepared)
{
@@ -604,7 +604,7 @@ void CpuGemmConvolution::prepare(ITensorPack &tensors)
_is_prepared = true;
}
}
-experimental::MemoryRequirements CpuGemmConvolution::workspace() const
+experimental::MemoryRequirements CpuGemmConv2d::workspace() const
{
return _aux_mem;
}
diff --git a/src/runtime/cpu/operators/CpuGemmConvolution.h b/src/runtime/cpu/operators/CpuGemmConv2d.h
index 7755bbe2a2..529256594f 100644
--- a/src/runtime/cpu/operators/CpuGemmConvolution.h
+++ b/src/runtime/cpu/operators/CpuGemmConv2d.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_GEMMCONVOLUTION_H
-#define ARM_COMPUTE_CPU_GEMMCONVOLUTION_H
+#ifndef ARM_COMPUTE_CPU_GEMM_CONV2D_H
+#define ARM_COMPUTE_CPU_GEMM_CONV2D_H
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
@@ -55,21 +55,21 @@ class CpuReshapeKernel;
* -# @ref kernels::CpuWeightsReshapeKernel
*
*/
-class CpuGemmConvolution : public ICpuOperator
+class CpuGemmConv2d : public ICpuOperator
{
public:
/** Constructor */
- CpuGemmConvolution();
+ CpuGemmConv2d();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CpuGemmConvolution(const CpuGemmConvolution &) = delete;
+ CpuGemmConv2d(const CpuGemmConv2d &) = delete;
/** Prevent instances of this class from being moved (As this class contains non movable objects) */
- CpuGemmConvolution(CpuGemmConvolution &&) = delete;
+ CpuGemmConv2d(CpuGemmConv2d &&) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CpuGemmConvolution &operator=(const CpuGemmConvolution &) = delete;
+ CpuGemmConv2d &operator=(const CpuGemmConv2d &) = delete;
/** Prevent instances of this class from being moved (As this class contains non movable objects) */
- CpuGemmConvolution &operator=(CpuGemmConvolution &&) = delete;
+ CpuGemmConv2d &operator=(CpuGemmConv2d &&) = delete;
/** Destructor */
- ~CpuGemmConvolution();
+ ~CpuGemmConv2d();
/** Set the input and output tensors.
*
* Valid data layouts:
@@ -200,4 +200,4 @@ private:
};
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_GEMMCONVOLUTION_H */
+#endif /* ARM_COMPUTE_CPU_GEMM_CONV2D_H */
diff --git a/src/runtime/gpu/cl/operators/ClConv2d.cpp b/src/runtime/gpu/cl/operators/ClConv2d.cpp
index 2f4d673d9c..0cb3a968e6 100644
--- a/src/runtime/gpu/cl/operators/ClConv2d.cpp
+++ b/src/runtime/gpu/cl/operators/ClConv2d.cpp
@@ -31,7 +31,7 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
#include "src/runtime/gpu/cl/operators/ClDirectConv2d.h"
-#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
+#include "src/runtime/gpu/cl/operators/ClGemmConv2d.h"
#include "src/runtime/gpu/cl/operators/ClWinogradConv2d.h"
#include <memory>
@@ -104,7 +104,7 @@ void ClConv2d::configure(const CLCompileContext &compile_context, ITensorInfo *s
}
case ConvolutionMethod::GEMM:
{
- auto f = std::make_unique<ClGemmConvolution>();
+ auto f = std::make_unique<ClGemmConv2d>();
f->configure(compile_context, src, weights, biases, dst, conv2d_info, weights_info);
_operator = std::move(f);
break;
@@ -143,7 +143,7 @@ Status ClConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, co
case ConvolutionMethod::GEMM:
{
// Validate gemm-based convolution layer
- ARM_COMPUTE_RETURN_ON_ERROR(ClGemmConvolution::validate(src, weights, biases, dst, conv2d_info, weights_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(ClGemmConv2d::validate(src, weights, biases, dst, conv2d_info, weights_info));
break;
}
default:
diff --git a/src/runtime/gpu/cl/operators/ClConv2d.h b/src/runtime/gpu/cl/operators/ClConv2d.h
index 0888c2f47b..cdf3b7df32 100644
--- a/src/runtime/gpu/cl/operators/ClConv2d.h
+++ b/src/runtime/gpu/cl/operators/ClConv2d.h
@@ -36,7 +36,7 @@ namespace opencl
{
/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
*
- * -# @ref opencl::ClGemmConvolution
+ * -# @ref opencl::ClGemmConv2d
* -# @ref opencl::ClWinogradConv2d
* -# @ref opencl::ClDirectConv2d
* -# @ref CLFFTConvolutionLayer
diff --git a/src/runtime/gpu/cl/operators/ClGemmConvolution.cpp b/src/runtime/gpu/cl/operators/ClGemmConv2d.cpp
index 1926cbbe4d..8c796e0712 100644
--- a/src/runtime/gpu/cl/operators/ClGemmConvolution.cpp
+++ b/src/runtime/gpu/cl/operators/ClGemmConv2d.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
+#include "src/runtime/gpu/cl/operators/ClGemmConv2d.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/PixelValue.h"
@@ -50,16 +50,16 @@ using namespace misc::shape_calculator;
using namespace utils::cast;
namespace opencl
{
-ClGemmConvolution::ClGemmConvolution()
+ClGemmConv2d::ClGemmConv2d()
: _weights_reshape_kernel(nullptr), _im2col_kernel(nullptr), _mm_gemm(nullptr), _mm_gemmlowp(nullptr), _col2im_kernel(nullptr), _activation_kernel(nullptr), _im2col_output(), _weights_reshaped(),
_gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _append_bias(false), _is_prepared(false), _aux_mem(AuxTensorIdx::Count)
{
}
-ClGemmConvolution::~ClGemmConvolution() = default;
+ClGemmConv2d::~ClGemmConv2d() = default;
-void ClGemmConvolution::configure_mm(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
- const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
- int gemm_3d_depth, const ActivationLayerInfo &act_info)
+void ClGemmConv2d::configure_mm(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
+ int gemm_3d_depth, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights);
ARM_COMPUTE_ERROR_THROW_ON(validate_mm(src, weights, biases, dst, gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
@@ -112,8 +112,8 @@ void ClGemmConvolution::configure_mm(const ClCompileContext &compile_context, co
}
}
-Status ClGemmConvolution::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
- const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
+Status ClGemmConv2d::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+ const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
{
const bool is_quantized = is_data_type_quantized_asymmetric(src->data_type());
@@ -151,14 +151,14 @@ Status ClGemmConvolution::validate_mm(const ITensorInfo *src, const ITensorInfo
}
}
-void ClGemmConvolution::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
- const Conv2dInfo &conv2d_info, const WeightsInfo &weights_info)
+void ClGemmConv2d::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const Conv2dInfo &conv2d_info, const WeightsInfo &weights_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
- ARM_COMPUTE_ERROR_THROW_ON(ClGemmConvolution::validate(src, weights, biases, dst,
- conv2d_info,
- weights_info));
+ ARM_COMPUTE_ERROR_THROW_ON(ClGemmConv2d::validate(src, weights, biases, dst,
+ conv2d_info,
+ weights_info));
const DataType data_type = src->data_type();
const DataLayout data_layout = src->data_layout();
@@ -334,8 +334,8 @@ void ClGemmConvolution::configure(const CLCompileContext &compile_context, ITens
_aux_mem[GemmOutput] = MemoryInfo(offset_int_vec(GemmOutput), MemoryLifetime::Temporary, _gemm_output.total_size());
}
-Status ClGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &conv2d_info,
- const WeightsInfo &weights_info)
+Status ClGemmConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &conv2d_info,
+ const WeightsInfo &weights_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
@@ -371,8 +371,8 @@ Status ClGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *we
const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv2d_info.conv_info.stride().first == 1
&& conv2d_info.conv_info.stride().second == 1);
- const bool skip_col2im = data_layout == DataLayout::NHWC;
- bool fuse_activation = true;
+ const bool skip_col2im = data_layout == DataLayout::NHWC;
+ bool fuse_activation = true;
ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * conv2d_info.num_groups) != src->dimension(idx_channel));
ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
@@ -521,7 +521,7 @@ Status ClGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *we
return Status{};
}
-void ClGemmConvolution::run(ITensorPack &tensors)
+void ClGemmConv2d::run(ITensorPack &tensors)
{
prepare(tensors);
@@ -593,7 +593,7 @@ void ClGemmConvolution::run(ITensorPack &tensors)
}
}
-void ClGemmConvolution::prepare(ITensorPack &tensors)
+void ClGemmConv2d::prepare(ITensorPack &tensors)
{
if(!_is_prepared)
{
@@ -620,7 +620,7 @@ void ClGemmConvolution::prepare(ITensorPack &tensors)
_is_prepared = true;
}
}
-experimental::MemoryRequirements ClGemmConvolution::workspace() const
+experimental::MemoryRequirements ClGemmConv2d::workspace() const
{
return _aux_mem;
}
diff --git a/src/runtime/gpu/cl/operators/ClGemmConvolution.h b/src/runtime/gpu/cl/operators/ClGemmConv2d.h
index 444516eaaa..e16d029e71 100644
--- a/src/runtime/gpu/cl/operators/ClGemmConvolution.h
+++ b/src/runtime/gpu/cl/operators/ClGemmConv2d.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CL_GEMMCONVOLUTION_H
-#define ARM_COMPUTE_CL_GEMMCONVOLUTION_H
+#ifndef ARM_COMPUTE_CL_GEMM_CONV2D_H
+#define ARM_COMPUTE_CL_GEMM_CONV2D_H
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
@@ -55,21 +55,21 @@ class ClActivationKernel;
* -# @ref opencl::kernels::ClCol2ImKernel (if NCHW data layout)
* -# @ref opencl::kernels::ClActivationKernel
*/
-class ClGemmConvolution : public IClOperator
+class ClGemmConv2d : public IClOperator
{
public:
/** Constructor */
- ClGemmConvolution();
+ ClGemmConv2d();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- ClGemmConvolution(const ClGemmConvolution &) = delete;
+ ClGemmConv2d(const ClGemmConv2d &) = delete;
/** Default move constructor */
- ClGemmConvolution(ClGemmConvolution &&) = default;
+ ClGemmConv2d(ClGemmConv2d &&) = default;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- ClGemmConvolution &operator=(const ClGemmConvolution &) = delete;
+ ClGemmConv2d &operator=(const ClGemmConv2d &) = delete;
/** Default move assignment operator */
- ClGemmConvolution &operator=(ClGemmConvolution &&) = default;
+ ClGemmConv2d &operator=(ClGemmConv2d &&) = default;
/**Default destructor */
- ~ClGemmConvolution();
+ ~ClGemmConv2d();
/** Set the input and output tensors.
*
* Valid data layouts:
@@ -182,4 +182,4 @@ private:
};
} // namespace opencl
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMMCONVOLUTION_H */
+#endif /* ARM_COMPUTE_CL_GEMM_CONV2D_H */
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 4332db605d..2178b9b209 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -29,7 +29,7 @@
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "src/core/helpers/MemoryHelpers.h"
-#include "src/runtime/cpu/operators/CpuGemmConvolution.h"
+#include "src/runtime/cpu/operators/CpuGemmConv2d.h"
#include "src/runtime/cpu/operators/CpuGemmDirectConv2d.h"
#include "src/runtime/cpu/operators/CpuWinogradConv2d.h"
#include "tests/NEON/Accessor.h"
@@ -510,7 +510,7 @@ using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Acces
template <typename T>
using NEGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T, true>;
-/** Test case for memory injection in @ref cpu::CpuGemmConvolution.
+/** Test case for memory injection in @ref cpu::CpuGemmConv2d.
*
* Configure the operator once and inject memory at run-time in multiple executions.
*
@@ -519,7 +519,7 @@ using NEGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixtur
*/
TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
{
- auto conv = std::make_unique<cpu::CpuGemmConvolution>();
+ auto conv = std::make_unique<cpu::CpuGemmConv2d>();
const auto src_info = TensorInfo(TensorShape(1U, 5U, 2U), 1, DataType::F32, DataLayout::NCHW);
const auto weight_info = TensorInfo(TensorShape(1U, 3U, 2U, 3U), 1, DataType::F32, DataLayout::NCHW);
const auto bias_info = TensorInfo(TensorShape(3U), 1, DataType::F32, DataLayout::NCHW);