aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-06-17 12:04:40 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-07-11 16:14:01 +0000
commitc1b76faf6be5c33dbf3269faea95e185ac37992f (patch)
treec52ecf022bf8b5e54844258744e6a10619b68d83 /arm_compute/runtime
parent0ec65b8c6438b6d12f17487fdc4c870fe37c7caa (diff)
downloadComputeLibrary-c1b76faf6be5c33dbf3269faea95e185ac37992f.tar.gz
COMPMID-2092: Refactoring interface for the deconvolution kernels (NEON/CL)
3RDPARTY_UPDATE Change-Id: Id7ddf97e2c9ceb2cb84084fab2c6f5697890c193 Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1424 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/runtime')
-rw-r--r--arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h34
-rw-r--r--arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h20
-rw-r--r--arm_compute/runtime/CPP/functions/CPPUpsample.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h34
4 files changed, 13 insertions, 85 deletions
diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
index e5b406ee5e..b722b466f0 100644
--- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
@@ -46,40 +46,6 @@ public:
/** Set the input, weights, biases and output tensors.
*
- * @note This method will be deprecated in the next release.
- *
- * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32.
- * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
- * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input.
- * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
- * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
- * @param[in] inner_border_right The number of zeros added to right edge of the input.
- * @param[in] inner_border_top The number of zeros added to top edge of the input.
- * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
- *
- */
- void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info,
- unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info = WeightsInfo());
- /** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayer
- *
- * @note This method will be deprecated in the next release.
- *
- * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32.
- * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
- * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input.
- * @param[in] output Output tensor info. The output has the same number of dimensions as the @p input.
- * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
- * @param[in] inner_border_right The number of zeros added to right edge of the input.
- * @param[in] inner_border_top The number of zeros added to top edge of the input.
- * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info,
- unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info = WeightsInfo());
-
- /** Set the input, weights, biases and output tensors.
- *
* @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32.
* @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
* @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input.
diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h
index 3751178703..cab252f0ea 100644
--- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h
+++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h
@@ -61,24 +61,20 @@ public:
/** Initialize the function's source, destination, interpolation type and border_mode.
*
- * @param[in, out] input Source tensor. Data type supported: QASYMM8/F16/F32.
- * @param[out] output Destination tensor. Data type supported: same as @p input.
- * @param[in] inner_border The number of zeros added to right and top edges of the input.
- * @param[in] info Contains padding and policies to be used in the deconvolution.
+ * @param[in, out] input Source tensor. Data type supported: QASYMM8/F16/F32.
+ * @param[out] output Destination tensor. Data type supported: same as @p input.
+ * @param[in] info Contains padding and policies to be used in the deconvolution.
*/
- void configure(ICLTensor *input, ICLTensor *output, const BorderSize &inner_border,
- const PadStrideInfo &info);
+ void configure(ICLTensor *input, ICLTensor *output, const PadStrideInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayerUpsample
*
- * @param[in] input Source tensor info. Data type supported: QASYMM8/F16/F32.
- * @param[in] output Destination tensor info. Data type supported: same as @p input.
- * @param[in] inner_border The number of zeros added to right and top edges of the input.
- * @param[in] info Contains padding and policies to be used in the deconvolution.
+ * @param[in] input Source tensor info. Data type supported: QASYMM8/F16/F32.
+ * @param[in] output Destination tensor info. Data type supported: same as @p input.
+ * @param[in] info Contains padding and policies to be used in the deconvolution.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const BorderSize &inner_border,
- const PadStrideInfo &info);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PadStrideInfo &info);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/CPP/functions/CPPUpsample.h b/arm_compute/runtime/CPP/functions/CPPUpsample.h
index fd7d9c24bf..13f8755762 100644
--- a/arm_compute/runtime/CPP/functions/CPPUpsample.h
+++ b/arm_compute/runtime/CPP/functions/CPPUpsample.h
@@ -38,13 +38,11 @@ class CPPUpsample : public ICPPSimpleFunction
public:
/** Configure the upsample CPP kernel
*
- * @param[in] input The input tensor to upsample. Data types supported: F32/F16/QASYMM8
- * @param[out] output The output tensor. Data types supported: Same as @p input
- * @param[in] info Padding information
- * @param[in] inner_border_right The number of zeros added to right edge of the input.
- * @param[in] inner_border_top The number of zeros added to top edge of the input.
+ * @param[in] input The input tensor to upsample. Data types supported: F32/F16/QASYMM8
+ * @param[out] output The output tensor. Data types supported: Same as @p input
+ * @param[in] info Padding information
*/
- void configure(const ITensor *input, ITensor *output, const PadStrideInfo &info, unsigned int inner_border_right, unsigned int inner_border_top);
+ void configure(const ITensor *input, ITensor *output, const PadStrideInfo &info);
};
}
#endif /* __ARM_COMPUTE_CPPUPSAMPLE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
index 4eb684b9aa..62977a7647 100644
--- a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
@@ -86,37 +86,6 @@ public:
NEDeconvolutionLayer &operator=(NEDeconvolutionLayer &&) = default;
/** Default destructor */
virtual ~NEDeconvolutionLayer() = default;
- /** Set the input, weights, biases and output tensors.
- *
- * @note This method will be deprecated in the next release.
- *
- * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32/F16/QASYMM8.
- * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
- * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Data types supported: S32 for QASYMM8 input, F32 for F32 input, F16 for F16 input.
- * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
- * @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
- * @param[in] inner_border_right The number of zeros added to right edge of the input.
- * @param[in] inner_border_top The number of zeros added to top edge of the input.
- *
- */
- void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info,
- unsigned int inner_border_right, unsigned int inner_border_top);
- /** Static function to check if given info will lead to a valid configuration of @ref NEDeconvolutionLayer
- *
- * @note This method will be deprecated in the next release.
- *
- * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32/F16/QASYMM8.
- * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
- * @param[in] bias (Optional) The biases have one dimension. Data type supported: Data types supported: S32 for QASYMM8 input, F32 for F32 input, F16 for F16 input.
- * @param[in] output Output tensor info. The output has the same number of dimensions as the @p input.
- * @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
- * @param[in] inner_border_right The number of zeros added to right edge of the input.
- * @param[in] inner_border_top The number of zeros added to top edge of the input.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &info,
- unsigned int inner_border_right, unsigned int inner_border_top);
/** Set the input, weights, biases and output tensors.
*
@@ -154,8 +123,7 @@ private:
const ITensor *_original_weights;
ITensor *_input;
PadStrideInfo _info;
- std::pair<unsigned int, unsigned int> _inner_border;
- bool _is_prepared;
+ bool _is_prepared;
};
} // arm_compute
#endif /* __ARM_COMPUTE_NEDECONVOLUTIONLAYER_H__ */