aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
diff options
context:
space:
mode:
authorJohn Kesapides <john.kesapides@arm.com>2019-02-04 12:37:29 +0000
committerPablo Marquez <pablo.tello@arm.com>2019-03-14 09:29:15 +0000
commit917959c88361e8148696c156453f69c6ae0c95c0 (patch)
treefbdadd65904b446edcc275f9e85874d2dfb7f8d3 /src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
parent894066de8cc26d1a3aca62dcaa6b30a2a1116028 (diff)
downloadComputeLibrary-917959c88361e8148696c156453f69c6ae0c95c0.tar.gz
COMPMID-1281 Investigate concatenation for RNN/LSTM NEON
Change-Id: I7f099348a361a6f2d4efb30618f58bd44dd41e6c Signed-off-by: John Kesapides <john.kesapides@arm.com> Reviewed-on: https://review.mlplatform.org/c/712 Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp')
-rw-r--r--src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp27
1 files changed, 24 insertions, 3 deletions
diff --git a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
index 7e435c34b1..17c352b8f3 100644
--- a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
@@ -40,7 +40,8 @@ NEWidthConcatenateLayer::NEWidthConcatenateLayer()
{
}
-Status NEWidthConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output)
+template <typename TensorInfoType, typename>
+inline Status NEWidthConcatenateLayer::validate_internal(const std::vector<TensorInfoType *> &inputs_vector, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2);
@@ -60,8 +61,8 @@ Status NEWidthConcatenateLayer::validate(const std::vector<ITensorInfo *> &input
return Status{};
}
-
-void NEWidthConcatenateLayer::configure(std::vector<ITensor *> inputs_vector, ITensor *output)
+template <typename TensorType, typename>
+inline void NEWidthConcatenateLayer::configure_internal(std::vector<TensorType *> &&inputs_vector, ITensor *output)
{
_num_inputs = inputs_vector.size();
@@ -87,6 +88,26 @@ void NEWidthConcatenateLayer::configure(std::vector<ITensor *> inputs_vector, IT
}
}
+void NEWidthConcatenateLayer::configure(std::vector<ITensor *> inputs_vector, ITensor *output)
+{
+ configure_internal(std::move(inputs_vector), output);
+}
+
+void NEWidthConcatenateLayer::configure(std::vector<const ITensor *> inputs_vector, ITensor *output)
+{
+ configure_internal(std::move(inputs_vector), output);
+}
+
+Status NEWidthConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output)
+{
+ return validate_internal(inputs_vector, output);
+}
+
+Status NEWidthConcatenateLayer::validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output)
+{
+ return validate_internal(inputs_vector, output);
+}
+
void NEWidthConcatenateLayer::run()
{
for(unsigned i = 0; i < _num_inputs; ++i)