aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorUsama Arif <usama.arif@arm.com>2019-04-16 14:32:25 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-04-17 15:49:59 +0000
commit2899e00a6fa57242a9bcae1d08a9a7e1e80f14e7 (patch)
tree78f9ed1fa6fe5cde2acf9a47170e72b96418d730 /arm_compute
parentdcd949d8c71d879ff656c13d068e8af3dd31eef1 (diff)
downloadComputeLibrary-2899e00a6fa57242a9bcae1d08a9a7e1e80f14e7.tar.gz
COMPMID-2049: Add support for deconvolution for qasymm8 on NEON
Change-Id: I02890c7542f6036edad9cbba9fdcf2312c70070a Signed-off-by: Usama Arif <usama.arif@arm.com> Reviewed-on: https://review.mlplatform.org/c/1000 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/CPP/kernels/CPPUpsampleKernel.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h16
2 files changed, 10 insertions, 10 deletions
diff --git a/arm_compute/core/CPP/kernels/CPPUpsampleKernel.h b/arm_compute/core/CPP/kernels/CPPUpsampleKernel.h
index e814c76c7d..4e61356760 100644
--- a/arm_compute/core/CPP/kernels/CPPUpsampleKernel.h
+++ b/arm_compute/core/CPP/kernels/CPPUpsampleKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,7 +55,7 @@ public:
/** Set the input and output of the kernel.
*
- * @param[in] input The input tensor to upsample. Data types supported: F32
+ * @param[in] input The input tensor to upsample. Data types supported: F32/QASYMM8
* @param[out] output The output tensor. Data types supported: Same as @p input
* @param[in] info Padding info.
* @param[in] inner_border_right The number of zeros added to right edge of the input.
diff --git a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
index dad5d81b14..25512fa147 100644
--- a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
@@ -90,9 +90,9 @@ public:
*
* @note This method will be deprecated in the next release.
*
- * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
+ * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32/QASYMM8.
* @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
- * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Same as @p input.
+ * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Data types supported: S32 for QASYMM8 input, F32 for F32 input.
* @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
* @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
* @param[in] inner_border_right The number of zeros added to right edge of the input.
@@ -105,9 +105,9 @@ public:
*
* @note This method will be deprecated in the next release.
*
- * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
+ * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32/QASYMM8.
* @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
- * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input.
+ * @param[in] bias (Optional) The biases have one dimension. Data type supported: Data types supported: S32 for QASYMM8 input, F32 for F32 input.
* @param[in] output Output tensor info. The output has the same number of dimensions as the @p input.
* @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
* @param[in] inner_border_right The number of zeros added to right edge of the input.
@@ -120,9 +120,9 @@ public:
/** Set the input, weights, biases and output tensors.
*
- * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
+ * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32/QASYMM8.
* @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
- * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Same as @p input.
+ * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Data types supported: S32 for QASYMM8 input, F32 for F32 input.
* @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
* @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
*
@@ -130,9 +130,9 @@ public:
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref NEDeconvolutionLayer
*
- * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
+ * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32/QASYMM8.
* @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
- * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input.
+ * @param[in] bias (Optional) The biases have one dimension. Data type supported: Data types supported: S32 for QASYMM8 input, F32 for F32 input.
* @param[in] output Output tensor info. The output has the same number of dimensions as the @p input.
* @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
*