aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h11
-rw-r--r--arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h84
-rw-r--r--arm_compute/runtime/CL/functions/CLConcatenateLayer.h2
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h2
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEConcatenateLayer.h81
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h30
-rw-r--r--arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h79
9 files changed, 286 insertions, 6 deletions
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index b9c54b2c25..156b116ce0 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -112,6 +112,7 @@
#include "arm_compute/core/NEON/kernels/NETransposeKernel.h"
#include "arm_compute/core/NEON/kernels/NEWarpKernel.h"
#include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
+#include "arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h"
#endif /* __ARM_COMPUTE_NEKERNELS_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h b/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h
index 12a5051ef8..848d89fc9f 100644
--- a/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h
@@ -55,7 +55,7 @@ public:
~NEDepthConcatenateLayerKernel() = default;
/** Initialise the kernel's inputs and output
*
- * @param[in] input Input tensor. Data types supported: F16/F32.
+ * @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] depth_offset The offset on the Z axis.
* @param[in,out] output Output tensor. Data types supported: Same as @p input.
*
@@ -64,6 +64,15 @@ public:
*
*/
void configure(const ITensor *input, unsigned int depth_offset, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConcatenateLayerKernel
+ *
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/F16/F32.
+ * @param[in] depth_offset The offset on the Z axis.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, unsigned int depth_offset, const ITensorInfo *output);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h b/arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h
new file mode 100644
index 0000000000..4cf32736e9
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __ARM_COMPUTE_NEWIDTHCONCATENATELAYERKERNEL_H__
+#define __ARM_COMPUTE_NEWIDTHCONCATENATELAYERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Interface for the width concatenate kernel.
+ * The input tensor will be concatenated into the output tensor.
+ */
+class NEWidthConcatenateLayerKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEWidthConcatenateLayerKernel";
+ }
+ /** Default constructor */
+ NEWidthConcatenateLayerKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEWidthConcatenateLayerKernel(const NEWidthConcatenateLayerKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEWidthConcatenateLayerKernel &operator=(const NEWidthConcatenateLayerKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEWidthConcatenateLayerKernel(NEWidthConcatenateLayerKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEWidthConcatenateLayerKernel &operator=(NEWidthConcatenateLayerKernel &&) = default;
+ /** Default destructor */
+ ~NEWidthConcatenateLayerKernel() = default;
+ /** Initialise the kernel's inputs and output
+ *
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] width_offset The offset on the X axis.
+ * @param[in,out] output Output tensor. Data types supported: Same as @p input.
+ *
+ */
+ void configure(const ITensor *input, unsigned int width_offset, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEWidthConcatenateLayerKernel
+ *
+ * @param[in] input Input tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] width_offset The offset on the X axis.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, unsigned int width_offset, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+ const ITensor *_input;
+ ITensor *_output;
+ unsigned int _width_offset;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEWIDTHCONCATENATELAYERKERNEL_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
index 018c58942f..4d4c62434a 100644
--- a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
@@ -57,7 +57,7 @@ public:
* @param[out] output Output tensor. Data types supported: Same as @p input.
* @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0 and 2.
*/
- void configure(std::vector<ICLTensor *> inputs_vector, ICLTensor *output, DataLayoutDimension axis);
+ void configure(const std::vector<ICLTensor *> &inputs_vector, ICLTensor *output, DataLayoutDimension axis);
/** Static function to check if given info will lead to a valid configuration of @ref CLConcatenateLayer
*
* @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
diff --git a/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
index bafce1c66f..aef5d63654 100644
--- a/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
@@ -63,7 +63,7 @@ public:
* while width and height are the maximum width and height of the input tensors.
* Finally, depth is the sum of the input depths.
*/
- void configure(std::vector<ICLTensor *> inputs_vector, ICLTensor *output);
+ void configure(const std::vector<ICLTensor *> &inputs_vector, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthConcatenateLayer
*
* @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 988eb3b791..6a3fabca67 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -41,6 +41,7 @@
#include "arm_compute/runtime/NEON/functions/NEChannelExtract.h"
#include "arm_compute/runtime/NEON/functions/NECol2Im.h"
#include "arm_compute/runtime/NEON/functions/NEColorConvert.h"
+#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
#include "arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h"
#include "arm_compute/runtime/NEON/functions/NEConvolution.h"
#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
@@ -112,6 +113,7 @@
#include "arm_compute/runtime/NEON/functions/NETranspose.h"
#include "arm_compute/runtime/NEON/functions/NEWarpAffine.h"
#include "arm_compute/runtime/NEON/functions/NEWarpPerspective.h"
+#include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h"
#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#endif /* __ARM_COMPUTE_NEFUNCTIONS_H__ */ \ No newline at end of file
diff --git a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
new file mode 100644
index 0000000000..2cdc720fb6
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NECONCATENATELAYER_H__
+#define __ARM_COMPUTE_NECONCATENATELAYER_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/Types.h"
+
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+// Forward declarations
+class ITensor;
+class ITensorInfo;
+class Status;
+
+/** Basic function to execute concatenate tensors along a given axis. This function calls the following kernels:
+ *
+ * -# @ref NEWidthConcatenateLayer (if underlying concatenation axis is 0).
+ * -# @ref NEDepthConcatenateLayer (if underlying concatenation axis is 2).
+ */
+class NEConcatenateLayer : public IFunction
+{
+public:
+ /** Default constructor */
+ NEConcatenateLayer();
+ /** Initialise the kernel's inputs vector and output.
+ *
+ * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
+ * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayer and @ref NEDepthConcatenateLayer.
+ *
+ * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
+ * @param[out] output Output tensor. Data types supported: Same as @p input.
+ * @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0 and 2.
+ */
+ void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, DataLayoutDimension axis);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEConcatenateLayer
+ *
+ * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
+ * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayer and @ref NEDepthConcatenateLayer.
+ *
+ * @param[in] inputs_vector The vectors containing all the tensors info to concatenate. Data types supported: QASYMM8/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input.
+ * @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0 and 2.
+ *
+ * @return a status
+ */
+ static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ std::unique_ptr<IFunction> _concat_function;
+};
+}
+#endif /* __ARM_COMPUTE_NECONCATENATELAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
index eefb5fa362..e2162ef042 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
@@ -49,10 +49,34 @@ public:
NEDepthConcatenateLayer();
/** Initialise the kernel's inputs vector and output.
*
- * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: F16/F32.
- * @param[out] output Output tensor. Data types supported: Same as @p inputs_vector.
+ * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
+ * Input dimensions might differ for each input for the first three dimensions (width, height, depth)
+ * and must match for the rest.
+ * Note that the difference between the minimum and maximum width and height among the input tensors
+ * must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
+ * height when they are less than the maximum input sizes.
+ * @param[out] output Output tensor. Data types supported: Same as @p input.
+ * Output tensor dimensions match the inputs' ones from the fourth dimension and above,
+ * while width and height are the maximum width and height of the input tensors.
+ * Finally, depth is the sum of the input depths.
*/
- void configure(std::vector<ITensor *> inputs_vector, ITensor *output);
+ void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConcatenateLayer
+ *
+ * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
+ * Input dimensions might differ for each input for the first three dimensions (width, height, depth)
+ * and must match for the rest.
+ * Note that the difference between the minimum and maximum width and height among the input tensors
+ * must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
+ * height when they are less than the maximum input sizes.
+ * @param[in] output Output tensor. Data types supported: Same as @p input.
+ * Output tensor dimensions match the inputs' ones from the fourth dimension and above,
+ * while width and height are the maximum width and height of the input tensors.
+ * Finally, depth is the sum of the input depths.
+ *
+ * @return a status
+ */
+ static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
new file mode 100644
index 0000000000..e68525fa76
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__
+#define __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h"
+
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+// Forward declarations
+class ITensor;
+
+/** Basic function to execute concatenate tensors along x axis. This function calls the following kernel:
+ *
+ * -# @ref NEWidthConcatenateLayerKernel
+ */
+class NEWidthConcatenateLayer : public IFunction
+{
+public:
+ /** Default constructor */
+ NEWidthConcatenateLayer();
+ /** Initialise the kernel's inputs vector and output.
+ *
+ * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * Dimensions of all the inputs should match apart for the width which can differ.
+ * @param[out] output Output tensor. Data types supported: Same as @p input.
+ * Output tensor dimensions are the same with the inputs from the second dimension and above.
+ * The first dimension (width) is the sum of the input tensors' widths.
+ */
+ void configure(std::vector<ITensor *> inputs_vector, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEWidthConcatenateLayer
+ *
+ * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * Dimensions of all the inputs should match apart for the width which can differ.
+ * @param[in] output Output tensor. Data types supported: Same as @p input.
+ * Output tensor dimensions are the same with the inputs from the second dimension and above.
+ * The first dimension (width) is the sum of the input tensors' widths.
+ *
+ * @return a status
+ */
+ static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ std::unique_ptr<NEWidthConcatenateLayerKernel[]> _concat_kernels_vector;
+ unsigned int _num_inputs;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__ */