From 6200fa405b16b4145b926a96de197718ad31bf93 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Fri, 6 Jul 2018 17:06:36 +0100 Subject: COMPMID-1288 Optimizing CLGEMMLowp using 8 bit dot product instruction Change-Id: I536174b9381660a94578d6aa1892a6289a820391 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/139109 Reviewed-by: Georgios Pinitas Tested-by: Jenkins --- arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h') diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h index d4166b3830..33ac8ecb8a 100644 --- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h +++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h @@ -57,7 +57,7 @@ public: * @param[in] is_batched_fc_layer True if it is a batched fully connected layer */ void configure(const ITensor *input, ITensor *output, bool transpose_weights, bool is_batched_fc_layer); - /** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayerReshapeWeights + /** Static function to check if given info will lead to a valid configuration of @ref NEFullyConnectedLayerReshapeWeights * * @param[in] input Weights tensor info. The weights must be 2 dimensional. Data types supported: F32. * @param[in] output Destination tensor info. Data type supported: Same as @p input. -- cgit v1.2.1