aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/NEON
diff options
context:
space:
mode:
authorIoan-Cristian Szabo <ioan-cristian.szabo@arm.com>2017-11-30 17:17:17 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:47:40 +0000
commitb4e3e1c371d8091e86ee1c6e704057559bbe1554 (patch)
treed072c9f9d7471e4df9ef5aa6b50cb09c35b0c361 /arm_compute/runtime/NEON
parentc1b6e37233e0ebd21cb44bf8863a09c0ba5feeb1 (diff)
downloadComputeLibrary-b4e3e1c371d8091e86ee1c6e704057559bbe1554.tar.gz
COMPMID-617: Add validate support for NEON FullyConnectedLayer
Change-Id: I08987022c8d4cc335c00b8af27bd3edb8fe64d3b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/111596 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Alexander Gilday <alexander.gilday@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute/runtime/NEON')
-rw-r--r--arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h24
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h14
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NEIm2Col.h36
4 files changed, 59 insertions, 25 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
index 463a7d53e3..9bc8d21fc4 100644
--- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -57,6 +57,16 @@ public:
* @param[in] is_batched_fc_layer True if it is a batched fully connected layer
*/
void configure(const ITensor *input, ITensor *output, bool transpose_weights, bool is_batched_fc_layer);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayerReshapeWeights
+ *
+ * @param[in] input Weights tensor info. The weights must be 2 dimensional. Data types supported: QS8/QS16/F32.
+ * @param[in] output Destination tensor info. Data type supported: Same as @p input.
+ * @param[in] transpose_weights True if the weights must be transposed. Data types supported: Same as @p weights.
+ * @param[in] is_batched_fc_layer True if it is a batched fully connected layer
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, bool transpose_weights, bool is_batched_fc_layer);
// Inherited methods overridden:
void run() override;
@@ -94,6 +104,18 @@ public:
* @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
*/
void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, bool transpose_weights = true, bool are_weights_reshaped = false);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
+ *
+ * @param[in] input Source tensor info. Data type supported: QS8/QS16/F16/F32.
+ * @param[in] weights Weights tensor info. The weights must be 2 dimensional. Data type supported: Same as @p input
+ * @param[in] biases Bias tensor info. It can be nullptr. Data type supported:Same as @p input.
+ * @param[in] output Destination tensor info. Data type supported: Same as @p input.
+ * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
+ * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false);
//Inherited methods override
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index c3c7f825a9..ac5f4caa78 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -141,12 +141,14 @@ public:
private:
/** Configures the appropriate matrix multiply routine
*
- * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
- * @param[in] weights Weights tensor. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data types supported: Same as @p input,
- * except for input of QASYMM8 type where output should be of S32 type.
+ * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. Data types supported: Same as @p input,
+ * except for input of QASYMM8 type where output should be of S32 type.
+ * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel
+ * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
*/
- void configure_mm(const ITensor *input, const ITensor *weights, ITensor *output);
+ void configure_mm(const ITensor *input, const ITensor *weights, ITensor *output, bool is_interleaved, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo());
/** Prepare the appropriate assembly optimized kernel
*
* @param[in] ci CPU information
@@ -178,7 +180,7 @@ private:
bool _is_fully_connected_convolution;
bool _are_weights_reshaped;
bool _is_quantized;
- bool _is_interleaved_transposed;
+ bool _is_interleaved;
};
}
#endif /* __ARM_COMPUTE_NECONVOLUTIONGEMMLAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h b/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h
index 447b8c9c70..8b9ad136b4 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,6 +42,14 @@ public:
* @param[out] output Output tensor. Data type supported: same as @p input
*/
void configure(const ITensor *input, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMTranspose1xW
+ *
+ * @param[in] input First input tensor. Data type supported: U8/S8/QS8/U16/S16/F16/U32/S32/F32/
+ * @param[in] output Output tensor. Data type supported: same as @p input
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
};
}
#endif /*__ARM_COMPUTE_NEGEMMTRANSPOSE1XW_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEIm2Col.h b/arm_compute/runtime/NEON/functions/NEIm2Col.h
index cb08f5cd09..cf4999b5af 100644
--- a/arm_compute/runtime/NEON/functions/NEIm2Col.h
+++ b/arm_compute/runtime/NEON/functions/NEIm2Col.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,28 +39,30 @@ class NEIm2Col : public INESimpleFunction
public:
/** Configure the im2col NEON kernel
*
- * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32
- * Note: QASYMM8 works only for has_bias = false
- * @param[out] output The output tensor. Data types supported: Same as @p input
- * @param[in] kernel_dims The kernel dimensions (width and height).
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] has_bias In case biases are provided expands the matrix with 1.
+ * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32
+ * Note: QASYMM8 works only for has_bias = false
+ * @param[out] output The output tensor. Data types supported: Same as @p input
+ * @param[in] kernel_dims The kernel dimensions (width and height).
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] has_bias In case biases are provided expands the matrix with 1.
+ * @param[in] is_fully_connected Determines whether this kernel will be called by @ref NEFullyConnectedLayer in order to validate the arguments
*/
- void configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias);
+ void configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, bool is_fully_connected = false);
/** Static function to check if given info will lead to a valid configuration of @ref NEIm2Col
*
- * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32
- * Note: QASYMM8 works only for has_bias = false
- * @param[in] output The output tensor. Data types supported: Same as @p input
- * @param[in] kernel_dims The kernel dimensions (width and height).
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] has_bias In case biases are provided expands the matrix with 1.
+ * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32
+ * Note: QASYMM8 works only for has_bias = false
+ * @param[in] output The output tensor. Data types supported: Same as @p input
+ * @param[in] kernel_dims The kernel dimensions (width and height).
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] has_bias In case biases are provided expands the matrix with 1.
+ * @param[in] is_fully_connected Determines whether this kernel will be called by @ref NEFullyConnectedLayer in order to validate the arguments
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, bool is_fully_connected);
};
}
#endif /* __ARM_COMPUTE_NEIM2COL_H__ */