aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2018-12-28 15:05:20 +0000
committerManuel Bottini <manuel.bottini@arm.com>2019-01-14 13:53:18 +0000
commit053e7510f24c2b02f9fae9c45fb6b874631a5376 (patch)
tree2da0a155512637017fb011a11f8c2f8bab494fa2 /arm_compute
parentb412fab0e3c8ec10e104f4d85760898a5b26179c (diff)
downloadComputeLibrary-053e7510f24c2b02f9fae9c45fb6b874631a5376.tar.gz
COMPMID-1758: NEON: Implement Range
Change-Id: I56dff9462b85760fbed6db43224cadb90d283810 Reviewed-on: https://review.mlplatform.org/472 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/CL/kernels/CLRangeKernel.h6
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/kernels/NERangeKernel.h90
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/getlane.h270
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h1
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/setlane.h208
-rw-r--r--arm_compute/runtime/CL/functions/CLRange.h4
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NERange.h71
9 files changed, 514 insertions, 138 deletions
diff --git a/arm_compute/core/CL/kernels/CLRangeKernel.h b/arm_compute/core/CL/kernels/CLRangeKernel.h
index 2da21175ce..2349b8ecad 100644
--- a/arm_compute/core/CL/kernels/CLRangeKernel.h
+++ b/arm_compute/core/CL/kernels/CLRangeKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,7 +31,7 @@ namespace arm_compute
{
class ICLTensor;
-/** Kernel class for Range()
+/** Kernel class for Range
*
* range generates a 1-D tensor containing a sequence of numbers that begins at 'start' and extends by increments
* of 'step' up to but not including 'end'.
@@ -51,7 +51,7 @@ public:
CLRangeKernel &operator=(CLRangeKernel &&) = default;
/** Default destructor */
~CLRangeKernel() = default;
- /** Initialise the kernel's output tensor, start, end and step of the sequence.
+ /** Initialize the kernel's output tensor, start, end and step of the sequence.
*
* @param[out] output Output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] start The starting value of the sequence.
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index a32c507266..a99b7d08e1 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -105,6 +105,7 @@
#include "arm_compute/core/NEON/kernels/NEPriorBoxLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEROIPoolingLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NERangeKernel.h"
#include "arm_compute/core/NEON/kernels/NEReductionOperationKernel.h"
#include "arm_compute/core/NEON/kernels/NERemapKernel.h"
#include "arm_compute/core/NEON/kernels/NEReorgLayerKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NERangeKernel.h b/arm_compute/core/NEON/kernels/NERangeKernel.h
new file mode 100644
index 0000000000..eeacf3f8f5
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NERangeKernel.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NERANGEKERNEL_H__
+#define __ARM_COMPUTE_NERANGEKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Kernel class for Range
+ *
+ * range generates a 1-D tensor containing a sequence of numbers that begins at 'start' and extends by increments
+ * of 'step' up to but not including 'end'.
+ */
+class NERangeKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NERangeKernel";
+ }
+ /** Default constructor */
+ NERangeKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NERangeKernel(const NERangeKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NERangeKernel &operator=(const NERangeKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NERangeKernel(NERangeKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NERangeKernel &operator=(NERangeKernel &&) = default;
+ /** Default destructor */
+ ~NERangeKernel() = default;
+ /** Initialize the kernel's output tensor, start, end and step of the sequence.
+ *
+ * @param[out] output Output tensor. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] start The starting value of the sequence.
+ * @param[in] end The ending (not including) value of the sequence.
+ * @param[in] step The gap between each pair of values in the sequence.
+ */
+ void configure(ITensor *output, float start, float end, float step);
+ /** Static function to check if given info will lead to a valid configuration of @ref NERangeKernel
+ *
+ * @param[in] output Output tensor info. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] start The starting value of the sequence.
+ * @param[in] end The ending (not including) value of the sequence.
+ * @param[in] step The gap between each pair of values in the sequence.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *output, float start, float end, float step);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+ using RangeFunction = void(ITensor *output, float start, float step, const Window &window);
+
+ RangeFunction *_func; /**< Range function to be called */
+ float _start; /**< Start of sequence */
+ float _end; /**< End of sequence */
+ float _step; /**< Increment/step value */
+ ITensor *_output; /**< Destination tensor */
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NERANGEKERNEL_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/getlane.h b/arm_compute/core/NEON/wrapper/intrinsics/getlane.h
index 107ce44e0c..68267ba92a 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/getlane.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/getlane.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,62 +30,62 @@ namespace arm_compute
{
namespace wrapper
{
-#define VGETLANE_IMPL_8(stype, vtype, postfix) \
- inline stype vgetlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vget_lane_##postfix(vector, 0); \
- case 1: \
- return vget_lane_##postfix(vector, 1); \
- case 2: \
- return vget_lane_##postfix(vector, 2); \
- case 3: \
- return vget_lane_##postfix(vector, 3); \
- case 4: \
- return vget_lane_##postfix(vector, 4); \
- case 5: \
- return vget_lane_##postfix(vector, 5); \
- case 6: \
- return vget_lane_##postfix(vector, 6); \
- case 7: \
- return vget_lane_##postfix(vector, 7); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETLANE_IMPL_8(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vget_lane_##postfix(vector, 0); \
+ case 1: \
+ return vget_lane_##postfix(vector, 1); \
+ case 2: \
+ return vget_lane_##postfix(vector, 2); \
+ case 3: \
+ return vget_lane_##postfix(vector, 3); \
+ case 4: \
+ return vget_lane_##postfix(vector, 4); \
+ case 5: \
+ return vget_lane_##postfix(vector, 5); \
+ case 6: \
+ return vget_lane_##postfix(vector, 6); \
+ case 7: \
+ return vget_lane_##postfix(vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
-#define VGETLANE_IMPL_4(stype, vtype, postfix) \
- inline stype vgetlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vget_lane_##postfix(vector, 0); \
- case 1: \
- return vget_lane_##postfix(vector, 1); \
- case 2: \
- return vget_lane_##postfix(vector, 2); \
- case 3: \
- return vget_lane_##postfix(vector, 3); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETLANE_IMPL_4(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vget_lane_##postfix(vector, 0); \
+ case 1: \
+ return vget_lane_##postfix(vector, 1); \
+ case 2: \
+ return vget_lane_##postfix(vector, 2); \
+ case 3: \
+ return vget_lane_##postfix(vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
-#define VGETLANE_IMPL_2(stype, vtype, postfix) \
- inline stype vgetlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vget_lane_##postfix(vector, 0); \
- case 1: \
- return vget_lane_##postfix(vector, 1); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETLANE_IMPL_2(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vget_lane_##postfix(vector, 0); \
+ case 1: \
+ return vget_lane_##postfix(vector, 1); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
VGETLANE_IMPL_8(uint8_t, uint8x8_t, u8)
@@ -99,90 +99,90 @@ VGETLANE_IMPL_2(float, float32x2_t, f32)
VGETLANE_IMPL_4(float16_t, float16x4_t, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-#define VGETQLANE_IMPL_16(stype, vtype, postfix) \
- inline stype vgetqlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vgetq_lane_##postfix(vector, 0); \
- case 1: \
- return vgetq_lane_##postfix(vector, 1); \
- case 2: \
- return vgetq_lane_##postfix(vector, 2); \
- case 3: \
- return vgetq_lane_##postfix(vector, 3); \
- case 4: \
- return vgetq_lane_##postfix(vector, 4); \
- case 5: \
- return vgetq_lane_##postfix(vector, 5); \
- case 6: \
- return vgetq_lane_##postfix(vector, 6); \
- case 7: \
- return vgetq_lane_##postfix(vector, 7); \
- case 8: \
- return vgetq_lane_##postfix(vector, 8); \
- case 9: \
- return vgetq_lane_##postfix(vector, 9); \
- case 10: \
- return vgetq_lane_##postfix(vector, 10); \
- case 11: \
- return vgetq_lane_##postfix(vector, 11); \
- case 12: \
- return vgetq_lane_##postfix(vector, 12); \
- case 13: \
- return vgetq_lane_##postfix(vector, 13); \
- case 14: \
- return vgetq_lane_##postfix(vector, 14); \
- case 15: \
- return vgetq_lane_##postfix(vector, 15); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETQLANE_IMPL_16(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ case 2: \
+ return vgetq_lane_##postfix(vector, 2); \
+ case 3: \
+ return vgetq_lane_##postfix(vector, 3); \
+ case 4: \
+ return vgetq_lane_##postfix(vector, 4); \
+ case 5: \
+ return vgetq_lane_##postfix(vector, 5); \
+ case 6: \
+ return vgetq_lane_##postfix(vector, 6); \
+ case 7: \
+ return vgetq_lane_##postfix(vector, 7); \
+ case 8: \
+ return vgetq_lane_##postfix(vector, 8); \
+ case 9: \
+ return vgetq_lane_##postfix(vector, 9); \
+ case 10: \
+ return vgetq_lane_##postfix(vector, 10); \
+ case 11: \
+ return vgetq_lane_##postfix(vector, 11); \
+ case 12: \
+ return vgetq_lane_##postfix(vector, 12); \
+ case 13: \
+ return vgetq_lane_##postfix(vector, 13); \
+ case 14: \
+ return vgetq_lane_##postfix(vector, 14); \
+ case 15: \
+ return vgetq_lane_##postfix(vector, 15); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
-#define VGETQLANE_IMPL_8(stype, vtype, postfix) \
- inline stype vgetqlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vgetq_lane_##postfix(vector, 0); \
- case 1: \
- return vgetq_lane_##postfix(vector, 1); \
- case 2: \
- return vgetq_lane_##postfix(vector, 2); \
- case 3: \
- return vgetq_lane_##postfix(vector, 3); \
- case 4: \
- return vgetq_lane_##postfix(vector, 4); \
- case 5: \
- return vgetq_lane_##postfix(vector, 5); \
- case 6: \
- return vgetq_lane_##postfix(vector, 6); \
- case 7: \
- return vgetq_lane_##postfix(vector, 7); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETQLANE_IMPL_8(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ case 2: \
+ return vgetq_lane_##postfix(vector, 2); \
+ case 3: \
+ return vgetq_lane_##postfix(vector, 3); \
+ case 4: \
+ return vgetq_lane_##postfix(vector, 4); \
+ case 5: \
+ return vgetq_lane_##postfix(vector, 5); \
+ case 6: \
+ return vgetq_lane_##postfix(vector, 6); \
+ case 7: \
+ return vgetq_lane_##postfix(vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
-#define VGETQLANE_IMPL_4(stype, vtype, postfix) \
- inline stype vgetqlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vgetq_lane_##postfix(vector, 0); \
- case 1: \
- return vgetq_lane_##postfix(vector, 1); \
- case 2: \
- return vgetq_lane_##postfix(vector, 2); \
- case 3: \
- return vgetq_lane_##postfix(vector, 3); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETQLANE_IMPL_4(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ case 2: \
+ return vgetq_lane_##postfix(vector, 2); \
+ case 3: \
+ return vgetq_lane_##postfix(vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
VGETQLANE_IMPL_16(uint8_t, uint8x16_t, u8)
@@ -199,6 +199,10 @@ VGETQLANE_IMPL_8(float16_t, float16x8_t, f16)
#undef VGETLANE_IMPL_8
#undef VGETLANE_IMPL_4
#undef VGETLANE_IMPL_2
+
+#undef VGETQLANE_IMPL_16
+#undef VGETQLANE_IMPL_8
+#undef VGETQLANE_IMPL_4
} // namespace wrapper
} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_GET_LANE_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index 97af983e62..896e5106ab 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -52,6 +52,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/pmin.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/pow.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/rev64.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/setlane.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/sub.h"
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/setlane.h b/arm_compute/core/NEON/wrapper/intrinsics/setlane.h
new file mode 100644
index 0000000000..4eba1490c3
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/setlane.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_SET_LANE_H__
+#define __ARM_COMPUTE_WRAPPER_SET_LANE_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VSETLANE_IMPL_8(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vset_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vset_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vset_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vset_lane_##postfix(value, vector, 3); \
+ case 4: \
+ return vset_lane_##postfix(value, vector, 4); \
+ case 5: \
+ return vset_lane_##postfix(value, vector, 5); \
+ case 6: \
+ return vset_lane_##postfix(value, vector, 6); \
+ case 7: \
+ return vset_lane_##postfix(value, vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETLANE_IMPL_4(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vset_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vset_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vset_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vset_lane_##postfix(value, vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETLANE_IMPL_2(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vset_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vset_lane_##postfix(value, vector, 1); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+VSETLANE_IMPL_8(uint8x8_t, uint8_t, uint8x8_t, u8)
+VSETLANE_IMPL_8(int8x8_t, int8_t, int8x8_t, s8)
+VSETLANE_IMPL_4(uint16x4_t, uint16_t, uint16x4_t, u16)
+VSETLANE_IMPL_4(int16x4_t, int16_t, int16x4_t, s16)
+VSETLANE_IMPL_2(uint32x2_t, uint32_t, uint32x2_t, u32)
+VSETLANE_IMPL_2(int32x2_t, int32_t, int32x2_t, s32)
+VSETLANE_IMPL_2(float32x2_t, float, float32x2_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSETLANE_IMPL_4(float16x4_t, float16_t, float16x4_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#define VSETQLANE_IMPL_16(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vsetq_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vsetq_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vsetq_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vsetq_lane_##postfix(value, vector, 3); \
+ case 4: \
+ return vsetq_lane_##postfix(value, vector, 4); \
+ case 5: \
+ return vsetq_lane_##postfix(value, vector, 5); \
+ case 6: \
+ return vsetq_lane_##postfix(value, vector, 6); \
+ case 7: \
+ return vsetq_lane_##postfix(value, vector, 7); \
+ case 8: \
+ return vsetq_lane_##postfix(value, vector, 8); \
+ case 9: \
+ return vsetq_lane_##postfix(value, vector, 9); \
+ case 10: \
+ return vsetq_lane_##postfix(value, vector, 10); \
+ case 11: \
+ return vsetq_lane_##postfix(value, vector, 11); \
+ case 12: \
+ return vsetq_lane_##postfix(value, vector, 12); \
+ case 13: \
+ return vsetq_lane_##postfix(value, vector, 13); \
+ case 14: \
+ return vsetq_lane_##postfix(value, vector, 14); \
+ case 15: \
+ return vsetq_lane_##postfix(value, vector, 15); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETQLANE_IMPL_8(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vsetq_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vsetq_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vsetq_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vsetq_lane_##postfix(value, vector, 3); \
+ case 4: \
+ return vsetq_lane_##postfix(value, vector, 4); \
+ case 5: \
+ return vsetq_lane_##postfix(value, vector, 5); \
+ case 6: \
+ return vsetq_lane_##postfix(value, vector, 6); \
+ case 7: \
+ return vsetq_lane_##postfix(value, vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETQLANE_IMPL_4(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vsetq_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vsetq_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vsetq_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vsetq_lane_##postfix(value, vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+VSETQLANE_IMPL_16(uint8x16_t, uint8_t, uint8x16_t, u8)
+VSETQLANE_IMPL_16(int8x16_t, int8_t, int8x16_t, s8)
+VSETQLANE_IMPL_8(uint16x8_t, uint16_t, uint16x8_t, u16)
+VSETQLANE_IMPL_8(int16x8_t, int16_t, int16x8_t, s16)
+VSETQLANE_IMPL_4(uint32x4_t, uint32_t, uint32x4_t, u32)
+VSETQLANE_IMPL_4(int32x4_t, int32_t, int32x4_t, s32)
+VSETQLANE_IMPL_4(float32x4_t, float, float32x4_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSETQLANE_IMPL_8(float16x8_t, float16_t, float16x8_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VSETLANE_IMPL_8
+#undef VSETLANE_IMPL_4
+#undef VSETLANE_IMPL_2
+
+#undef VSETQLANE_IMPL_16
+#undef VSETQLANE_IMPL_8
+#undef VSETQLANE_IMPL_4
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_AET_LANE_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLRange.h b/arm_compute/runtime/CL/functions/CLRange.h
index 2614534f14..904e6ad20f 100644
--- a/arm_compute/runtime/CL/functions/CLRange.h
+++ b/arm_compute/runtime/CL/functions/CLRange.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,7 +39,7 @@ class ICLTensor;
class CLRange : public ICLSimpleFunction
{
public:
- /** Initialise the kernel's start, end, step and output tensor.
+ /** Initialize the kernel's start, end, step and output tensor.
*
* @param[out] output Output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] start The starting value of the sequence.
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index da61853785..ceebc7b844 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -107,6 +107,7 @@
#include "arm_compute/runtime/NEON/functions/NEQuantizationLayer.h"
#include "arm_compute/runtime/NEON/functions/NERNNLayer.h"
#include "arm_compute/runtime/NEON/functions/NEROIPoolingLayer.h"
+#include "arm_compute/runtime/NEON/functions/NERange.h"
#include "arm_compute/runtime/NEON/functions/NEReduceMean.h"
#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
#include "arm_compute/runtime/NEON/functions/NERemap.h"
diff --git a/arm_compute/runtime/NEON/functions/NERange.h b/arm_compute/runtime/NEON/functions/NERange.h
new file mode 100644
index 0000000000..d888c5106d
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NERange.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NERANGE_H__
+#define __ARM_COMPUTE_NERANGE_H__
+
+#include "arm_compute/core/NEON/kernels/NERangeKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Basic function to run @ref NERangeKernel
+ *
+ * @note The tensor data type for the output must be U8/S8/U16/S16/U32/S32/F16/F32.
+ * @note The function performs generates a sequence with the given start, end and step.
+ */
+class NERange : public IFunction
+{
+public:
+ /** Default constructor */
+ NERange();
+ /** Initialize the kernel's start, end, step and output tensor.
+ *
+ * @param[out] output Output tensor. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] start The starting value of the sequence.
+ * @param[in] end The ending (not including) value of the sequence.
+ * @param[in] step The gap between each pair of values in the sequence. Default is 1.
+ */
+ void configure(ITensor *output, float start, float end, float step = 1.f);
+ /** Static function to check if given info will lead to a valid configuration of @ref NERange
+ *
+ * @param[in] output Output tensor info. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] start The starting value of the sequence.
+ * @param[in] end The ending (not including) value of the sequence.
+ * @param[in] step The gap between each pair of values in the sequence. Default is 1.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *output, float start, float end, float step = 1.f);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ NERangeKernel _kernel;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NERANGE_H__ */