diff options
Diffstat (limited to 'arm_compute')
-rw-r--r-- | arm_compute/runtime/CL/CLTypes.h | 10 | ||||
-rw-r--r-- | arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h | 2 |
2 files changed, 1 insertions, 11 deletions
diff --git a/arm_compute/runtime/CL/CLTypes.h b/arm_compute/runtime/CL/CLTypes.h index cf0486c8c3..bba25c6d64 100644 --- a/arm_compute/runtime/CL/CLTypes.h +++ b/arm_compute/runtime/CL/CLTypes.h @@ -30,18 +30,8 @@ namespace arm_compute /** OpenCL GEMM kernel types */ enum class CLGEMMKernelType { - /** Native GEMM kernel with fixed block size. - * @note Temporary variant to keep compatibility with the old implementation. - * @note This variant will be deprecated in favor of a new and configurable NATIVE variant - */ - NATIVE_V1, /** Native GEMM kernel with configurable block size.*/ NATIVE, - /** Reshaped GEMM kernel where both lhs and rhs matrices are reshaped. Fixed block size fixed. - * @note Temporary variant to keep compatibility with the old implementation. - * @note This variant will be deprecated in favor of RESHAPED - */ - RESHAPED_V1, /** Reshaped GEMM kernel where both lhs and rhs matrices are reshaped. Configurable reshape and block size */ RESHAPED, /** Reshaped GEMM kernel where only the rhs matrix is reshaped. Configurable reshape and block size */ diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h index 9235a85d2c..2947b4890c 100644 --- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h +++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h @@ -36,7 +36,7 @@ namespace arm_compute * * -# @ref opencl::kernels::ClIm2ColKernel (called when the input comes from a convolutional layer) * -# @ref CLTranspose (if @p are_weights_reshaped is set to false and transpose_weights is set to true ) (called once) - * -# @ref opencl::kernels::ClGemmMatrixMultiplyKernel or @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric) + * -# @ref opencl::ClGemm or @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric) * * @note The fully connected layer accepts "weights" tensors only with 2 dimensions. */ |