diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2018-11-16 17:11:50 +0000 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2018-11-19 17:42:58 +0000 |
commit | 8cffcd6b6e4e95f97767f2a25ccc8826dd69c358 (patch) | |
tree | 339d4053464ef995d24da035595b44155810036d /arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h | |
parent | d5c075c4ecdac35cd07538acc559a2d8805d8c1c (diff) | |
download | ComputeLibrary-8cffcd6b6e4e95f97767f2a25ccc8826dd69c358.tar.gz |
COMPMID-1644: NEDepthwiseConvolution for FP16 NHWC
Change-Id: I6e7dee8bd615a5eff01c523f208a218574ee5eab
Diffstat (limited to 'arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h')
-rw-r--r-- | arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h b/arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h index b78684f993..dcf52442a9 100644 --- a/arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h +++ b/arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h @@ -53,7 +53,8 @@ public: NEDepthwiseWeightsReshapeKernel &operator=(NEDepthwiseWeightsReshapeKernel &&) = default; /** Set the input and output of the kernel. * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: QASYMM8, F32. + * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. + * Data type supported: QASYMM8/F16/F32. * @param[out] output The output tensor. Data type supported: same as @p input. * @param[in] biases (Optional) The input biases to add. Shape [IFM]. Data type supported: same as @p input. */ @@ -61,7 +62,8 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref NEDepthwiseWeightsReshapeKernel * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: QASYMM8, F32. + * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. + * Data type supported: QASYMM8/F16/F32. * @param[in] output The output tensor. Data type supported: same as @p input. * @param[in] biases (Optional) The input biases to add. Shape [IFM]. Data type supported: same as @p input. * @@ -81,5 +83,5 @@ private: ITensor *_output; const ITensor *_biases; }; -} // arm_compute +} // namespace arm_compute #endif /*__ARM_COMPUTE_NEDEPTHWISEWEIGHTSRESHAPEKERNEL_H__ */ |