aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/NEON
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-05-17 18:14:40 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-05-21 11:28:01 +0000
commit09f24975437e2e141ba51a07055a9372b0d173a2 (patch)
treefe565e4b9abd379cb1f467e5d9e36d68fcfbacef /arm_compute/runtime/NEON
parentf24411ffc842970609a1fb6ba2f9527cfb681dbd (diff)
downloadComputeLibrary-09f24975437e2e141ba51a07055a9372b0d173a2.tar.gz
COMPMID-2109: Remove CL/NE Width/Depth ConcatenateLayer functions.
Change-Id: Icbda771abffbb45d4ed0958933c60ff9ace01314 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/1178 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/runtime/NEON')
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEConcatenateLayer.h22
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h93
-rw-r--r--arm_compute/runtime/NEON/functions/NELSTMLayer.h12
-rw-r--r--arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h90
5 files changed, 22 insertions, 197 deletions
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index d84422f882..0d94ea78fc 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -51,7 +51,6 @@
#include "arm_compute/runtime/NEON/functions/NECopy.h"
#include "arm_compute/runtime/NEON/functions/NECropResize.h"
#include "arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDepthwiseSeparableConvolutionLayer.h"
@@ -142,7 +141,6 @@
#include "arm_compute/runtime/NEON/functions/NEUpsampleLayer.h"
#include "arm_compute/runtime/NEON/functions/NEWarpAffine.h"
#include "arm_compute/runtime/NEON/functions/NEWarpPerspective.h"
-#include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h"
#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEYOLOLayer.h"
diff --git a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
index f8cda326d2..8c97efc4f0 100644
--- a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
@@ -26,8 +26,9 @@
#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/core/NEON/kernels/NEHeightConcatenateLayerKernel.h"
+#include "arm_compute/core/NEON/INEKernel.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/Requires.h"
#include <memory>
#include <vector>
@@ -41,9 +42,9 @@ class Status;
/** Basic function to execute concatenate tensors along a given axis. This function calls the following kernels:
*
- * -# @ref NEWidthConcatenateLayer (if underlying concatenation axis is 0).
+ * -# @ref NEWidthConcatenateLayerKernel (if underlying concatenation axis is 0).
* -# @ref NEHeightConcatenateLayerKernel (if underlying concatenation axis is 1).
- * -# @ref NEDepthConcatenateLayer (if underlying concatenation axis is 2).
+ * -# @ref NEDepthConcatenateLayerKernel (if underlying concatenation axis is 2).
*/
class NEConcatenateLayer : public IFunction
{
@@ -53,17 +54,18 @@ public:
/** Initialise the kernel's inputs vector and output.
*
* @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
- * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayer, @ref NEHeightConcatenateLayerKernel and @ref NEDepthConcatenateLayer.
+ * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayerKernel, @ref NEHeightConcatenateLayerKernel and @ref NEDepthConcatenateLayerKernel.
*
* @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
* @param[out] output Output tensor. Data types supported: Same as @p input.
* @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1 and 2.
*/
- void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, size_t axis);
+ void configure(std::vector<ITensor *> inputs_vector, ITensor *output, size_t axis);
+ void configure(std::vector<const ITensor *> inputs_vector, ITensor *output, size_t axis);
/** Static function to check if given info will lead to a valid configuration of @ref NEConcatenateLayer
*
* @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
- * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayer, @ref NEHeightConcatenateLayerKernel and @ref NEDepthConcatenateLayer.
+ * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayerKernel, @ref NEHeightConcatenateLayerKernel and @ref NEDepthConcatenateLayerKernel.
*
* @param[in] inputs_vector The vectors containing all the tensors info to concatenate. Data types supported: QASYMM8/F16/F32.
* @param[in] output Output tensor info. Data types supported: Same as @p input.
@@ -72,11 +74,19 @@ public:
* @return a status
*/
static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
+ static Status validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
// Inherited methods overridden:
void run() override;
private:
+ template <typename TensorType, REQUIRES_TA(std::is_same<typename std::remove_cv<TensorType>::type, ITensor>::value)>
+ void configure_internal(std::vector<TensorType *> &&inputs_vector, ITensor *output, size_t axis);
+
+ template <typename TensorInfoType, REQUIRES_TA(std::is_same<typename std::remove_cv<TensorInfoType>::type, ITensorInfo>::value)>
+ static Status validate_internal(const std::vector<TensorInfoType *> &inputs_vector, const ITensorInfo *output, size_t axis);
+
+private:
std::vector<std::unique_ptr<INEKernel>> _concat_kernels;
unsigned int _num_inputs;
unsigned int _axis;
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
deleted file mode 100644
index b3bf752b40..0000000000
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEDEPTHCONCATENATE_H__
-#define __ARM_COMPUTE_NEDEPTHCONCATENATE_H__
-
-#include "arm_compute/runtime/IFunction.h"
-
-#include "arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h"
-#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
-
-#include <memory>
-#include <vector>
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Basic function to execute concatenate tensors along z axis. This function calls the following kernels:
- *
- * -# @ref NEFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions)
- * -# @ref NEDepthConcatenateLayerKernel
- *
- * @deprecated This function is deprecated and will be removed in release 19.08
- *
- */
-class NEDepthConcatenateLayer : public IFunction
-{
-public:
- /** Default constructor */
- NEDepthConcatenateLayer();
- /** Initialise the kernel's inputs vector and output.
- *
- * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
- * Input dimensions might differ for each input for the first three dimensions (width, height, depth)
- * and must match for the rest.
- * Note that the difference between the minimum and maximum width and height among the input tensors
- * must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
- * height when they are less than the maximum input sizes.
- * @param[out] output Output tensor. Data types supported: Same as @p input.
- * Output tensor dimensions match the inputs' ones from the fourth dimension and above,
- * while width and height are the maximum width and height of the input tensors.
- * Finally, depth is the sum of the input depths.
- */
- void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConcatenateLayer
- *
- * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
- * Input dimensions might differ for each input for the first three dimensions (width, height, depth)
- * and must match for the rest.
- * Note that the difference between the minimum and maximum width and height among the input tensors
- * must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
- * height when they are less than the maximum input sizes.
- * @param[in] output Output tensor. Data types supported: Same as @p input.
- * Output tensor dimensions match the inputs' ones from the fourth dimension and above,
- * while width and height are the maximum width and height of the input tensors.
- * Finally, depth is the sum of the input depths.
- *
- * @return a status
- */
- static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
-
- // Inherited methods overridden:
- void run() override;
-
-private:
- std::vector<ITensor *> _inputs_vector;
- std::vector<std::unique_ptr<NEDepthConcatenateLayerKernel>> _concat_kernels_vector;
- std::vector<std::unique_ptr<NEFillBorderKernel>> _border_handlers_vector;
- unsigned int _num_inputs;
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_NEDEPTHCONCATENATE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NELSTMLayer.h b/arm_compute/runtime/NEON/functions/NELSTMLayer.h
index f3a1aa7c75..cf0f06c215 100644
--- a/arm_compute/runtime/NEON/functions/NELSTMLayer.h
+++ b/arm_compute/runtime/NEON/functions/NELSTMLayer.h
@@ -32,9 +32,9 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
+#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
-#include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h"
#include "arm_compute/runtime/common/LSTMParams.h"
namespace arm_compute
@@ -176,11 +176,11 @@ private:
NEActivationLayerKernel _projection_clip;
NECopyKernel _copy_cell_state;
NECopyKernel _copy_output;
- NEWidthConcatenateLayer _concat_scratch_buffer;
- NEWidthConcatenateLayer _concat_inputs_forget_gate;
- NEWidthConcatenateLayer _concat_weights_forget_gate;
- NEWidthConcatenateLayer _concat_weights_input_gate;
- NEWidthConcatenateLayer _concat_weights_output;
+ NEConcatenateLayer _concat_scratch_buffer;
+ NEConcatenateLayer _concat_inputs_forget_gate;
+ NEConcatenateLayer _concat_weights_forget_gate;
+ NEConcatenateLayer _concat_weights_input_gate;
+ NEConcatenateLayer _concat_weights_output;
Tensor _input_gate_out1;
Tensor _input_gate_out2;
Tensor _input_gate_out3;
diff --git a/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
deleted file mode 100644
index 8d221766cd..0000000000
--- a/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2018-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__
-#define __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__
-
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/runtime/IFunction.h"
-
-#include "arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h"
-
-#include "arm_compute/core/utils/misc/Requires.h"
-
-#include <memory>
-#include <type_traits>
-#include <vector>
-
-namespace arm_compute
-{
-// Forward declarations
-class ITensor;
-
-/** Basic function to execute concatenate tensors along x axis. This function calls the following kernel:
- *
- * -# @ref NEWidthConcatenateLayerKernel
- *
- * @deprecated This function is deprecated and will be removed in release 19.08
- */
-class NEWidthConcatenateLayer : public IFunction
-{
-public:
- /** Default constructor */
- NEWidthConcatenateLayer();
- /** Initialise the kernel's inputs vector and output.
- *
- * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
- * Dimensions of all the inputs should match apart for the width which can differ.
- * @param[out] output Output tensor. Data types supported: Same as @p input.
- * Output tensor dimensions are the same with the inputs from the second dimension and above.
- * The first dimension (width) is the sum of the input tensors' widths.
- */
- void configure(std::vector<ITensor *> inputs_vector, ITensor *output);
- void configure(std::vector<const ITensor *> inputs_vector, ITensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref NEWidthConcatenateLayer
- *
- * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
- * Dimensions of all the inputs should match apart for the width which can differ.
- * @param[in] output Output tensor. Data types supported: Same as @p input.
- * Output tensor dimensions are the same with the inputs from the second dimension and above.
- * The first dimension (width) is the sum of the input tensors' widths.
- *
- * @return a status
- */
- static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
- static Status validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output);
-
- // Inherited methods overridden:
- void run() override;
-
-private:
- std::vector<NEWidthConcatenateLayerKernel> _concat_kernels_vector;
- unsigned int _num_inputs;
- template <typename TensorType, REQUIRES_TA(std::is_same<typename std::remove_cv<TensorType>::type, ITensor>::value)>
- void configure_internal(std::vector<TensorType *> &&inputs_vector, ITensor *output);
- template <typename TensorInfoType, REQUIRES_TA(std::is_same<typename std::remove_cv<TensorInfoType>::type, ITensorInfo>::value)>
- static Status validate_internal(const std::vector<TensorInfoType *> &inputs_vector, const ITensorInfo *output);
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__ */