aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2018-12-28 15:05:20 +0000
committerManuel Bottini <manuel.bottini@arm.com>2019-01-14 13:53:18 +0000
commit053e7510f24c2b02f9fae9c45fb6b874631a5376 (patch)
tree2da0a155512637017fb011a11f8c2f8bab494fa2
parentb412fab0e3c8ec10e104f4d85760898a5b26179c (diff)
downloadComputeLibrary-053e7510f24c2b02f9fae9c45fb6b874631a5376.tar.gz
COMPMID-1758: NEON: Implement Range
Change-Id: I56dff9462b85760fbed6db43224cadb90d283810 Reviewed-on: https://review.mlplatform.org/472 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/CL/kernels/CLRangeKernel.h6
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/kernels/NERangeKernel.h90
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/getlane.h270
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h1
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/setlane.h208
-rw-r--r--arm_compute/runtime/CL/functions/CLRange.h4
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NERange.h71
-rw-r--r--src/core/CL/kernels/CLRangeKernel.cpp14
-rw-r--r--src/core/NEON/kernels/NERangeKernel.cpp183
-rw-r--r--src/runtime/NEON/functions/NERange.cpp49
-rw-r--r--tests/validation/NEON/Range.cpp178
-rw-r--r--utils/Utils.h16
14 files changed, 943 insertions, 149 deletions
diff --git a/arm_compute/core/CL/kernels/CLRangeKernel.h b/arm_compute/core/CL/kernels/CLRangeKernel.h
index 2da21175ce..2349b8ecad 100644
--- a/arm_compute/core/CL/kernels/CLRangeKernel.h
+++ b/arm_compute/core/CL/kernels/CLRangeKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,7 +31,7 @@ namespace arm_compute
{
class ICLTensor;
-/** Kernel class for Range()
+/** Kernel class for Range
*
* range generates a 1-D tensor containing a sequence of numbers that begins at 'start' and extends by increments
* of 'step' up to but not including 'end'.
@@ -51,7 +51,7 @@ public:
CLRangeKernel &operator=(CLRangeKernel &&) = default;
/** Default destructor */
~CLRangeKernel() = default;
- /** Initialise the kernel's output tensor, start, end and step of the sequence.
+ /** Initialize the kernel's output tensor, start, end and step of the sequence.
*
* @param[out] output Output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] start The starting value of the sequence.
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index a32c507266..a99b7d08e1 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -105,6 +105,7 @@
#include "arm_compute/core/NEON/kernels/NEPriorBoxLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEROIPoolingLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NERangeKernel.h"
#include "arm_compute/core/NEON/kernels/NEReductionOperationKernel.h"
#include "arm_compute/core/NEON/kernels/NERemapKernel.h"
#include "arm_compute/core/NEON/kernels/NEReorgLayerKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NERangeKernel.h b/arm_compute/core/NEON/kernels/NERangeKernel.h
new file mode 100644
index 0000000000..eeacf3f8f5
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NERangeKernel.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NERANGEKERNEL_H__
+#define __ARM_COMPUTE_NERANGEKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Kernel class for Range
+ *
+ * range generates a 1-D tensor containing a sequence of numbers that begins at 'start' and extends by increments
+ * of 'step' up to but not including 'end'.
+ */
+class NERangeKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NERangeKernel";
+ }
+ /** Default constructor */
+ NERangeKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NERangeKernel(const NERangeKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NERangeKernel &operator=(const NERangeKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NERangeKernel(NERangeKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NERangeKernel &operator=(NERangeKernel &&) = default;
+ /** Default destructor */
+ ~NERangeKernel() = default;
+ /** Initialize the kernel's output tensor, start, end and step of the sequence.
+ *
+ * @param[out] output Output tensor. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] start The starting value of the sequence.
+ * @param[in] end The ending (not including) value of the sequence.
+ * @param[in] step The gap between each pair of values in the sequence.
+ */
+ void configure(ITensor *output, float start, float end, float step);
+ /** Static function to check if given info will lead to a valid configuration of @ref NERangeKernel
+ *
+ * @param[in] output Output tensor info. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] start The starting value of the sequence.
+ * @param[in] end The ending (not including) value of the sequence.
+ * @param[in] step The gap between each pair of values in the sequence.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *output, float start, float end, float step);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+ using RangeFunction = void(ITensor *output, float start, float step, const Window &window);
+
+ RangeFunction *_func; /**< Range function to be called */
+ float _start; /**< Start of sequence */
+ float _end; /**< End of sequence */
+ float _step; /**< Increment/step value */
+ ITensor *_output; /**< Destination tensor */
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NERANGEKERNEL_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/getlane.h b/arm_compute/core/NEON/wrapper/intrinsics/getlane.h
index 107ce44e0c..68267ba92a 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/getlane.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/getlane.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,62 +30,62 @@ namespace arm_compute
{
namespace wrapper
{
-#define VGETLANE_IMPL_8(stype, vtype, postfix) \
- inline stype vgetlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vget_lane_##postfix(vector, 0); \
- case 1: \
- return vget_lane_##postfix(vector, 1); \
- case 2: \
- return vget_lane_##postfix(vector, 2); \
- case 3: \
- return vget_lane_##postfix(vector, 3); \
- case 4: \
- return vget_lane_##postfix(vector, 4); \
- case 5: \
- return vget_lane_##postfix(vector, 5); \
- case 6: \
- return vget_lane_##postfix(vector, 6); \
- case 7: \
- return vget_lane_##postfix(vector, 7); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETLANE_IMPL_8(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vget_lane_##postfix(vector, 0); \
+ case 1: \
+ return vget_lane_##postfix(vector, 1); \
+ case 2: \
+ return vget_lane_##postfix(vector, 2); \
+ case 3: \
+ return vget_lane_##postfix(vector, 3); \
+ case 4: \
+ return vget_lane_##postfix(vector, 4); \
+ case 5: \
+ return vget_lane_##postfix(vector, 5); \
+ case 6: \
+ return vget_lane_##postfix(vector, 6); \
+ case 7: \
+ return vget_lane_##postfix(vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
-#define VGETLANE_IMPL_4(stype, vtype, postfix) \
- inline stype vgetlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vget_lane_##postfix(vector, 0); \
- case 1: \
- return vget_lane_##postfix(vector, 1); \
- case 2: \
- return vget_lane_##postfix(vector, 2); \
- case 3: \
- return vget_lane_##postfix(vector, 3); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETLANE_IMPL_4(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vget_lane_##postfix(vector, 0); \
+ case 1: \
+ return vget_lane_##postfix(vector, 1); \
+ case 2: \
+ return vget_lane_##postfix(vector, 2); \
+ case 3: \
+ return vget_lane_##postfix(vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
-#define VGETLANE_IMPL_2(stype, vtype, postfix) \
- inline stype vgetlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vget_lane_##postfix(vector, 0); \
- case 1: \
- return vget_lane_##postfix(vector, 1); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETLANE_IMPL_2(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vget_lane_##postfix(vector, 0); \
+ case 1: \
+ return vget_lane_##postfix(vector, 1); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
VGETLANE_IMPL_8(uint8_t, uint8x8_t, u8)
@@ -99,90 +99,90 @@ VGETLANE_IMPL_2(float, float32x2_t, f32)
VGETLANE_IMPL_4(float16_t, float16x4_t, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-#define VGETQLANE_IMPL_16(stype, vtype, postfix) \
- inline stype vgetqlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vgetq_lane_##postfix(vector, 0); \
- case 1: \
- return vgetq_lane_##postfix(vector, 1); \
- case 2: \
- return vgetq_lane_##postfix(vector, 2); \
- case 3: \
- return vgetq_lane_##postfix(vector, 3); \
- case 4: \
- return vgetq_lane_##postfix(vector, 4); \
- case 5: \
- return vgetq_lane_##postfix(vector, 5); \
- case 6: \
- return vgetq_lane_##postfix(vector, 6); \
- case 7: \
- return vgetq_lane_##postfix(vector, 7); \
- case 8: \
- return vgetq_lane_##postfix(vector, 8); \
- case 9: \
- return vgetq_lane_##postfix(vector, 9); \
- case 10: \
- return vgetq_lane_##postfix(vector, 10); \
- case 11: \
- return vgetq_lane_##postfix(vector, 11); \
- case 12: \
- return vgetq_lane_##postfix(vector, 12); \
- case 13: \
- return vgetq_lane_##postfix(vector, 13); \
- case 14: \
- return vgetq_lane_##postfix(vector, 14); \
- case 15: \
- return vgetq_lane_##postfix(vector, 15); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETQLANE_IMPL_16(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ case 2: \
+ return vgetq_lane_##postfix(vector, 2); \
+ case 3: \
+ return vgetq_lane_##postfix(vector, 3); \
+ case 4: \
+ return vgetq_lane_##postfix(vector, 4); \
+ case 5: \
+ return vgetq_lane_##postfix(vector, 5); \
+ case 6: \
+ return vgetq_lane_##postfix(vector, 6); \
+ case 7: \
+ return vgetq_lane_##postfix(vector, 7); \
+ case 8: \
+ return vgetq_lane_##postfix(vector, 8); \
+ case 9: \
+ return vgetq_lane_##postfix(vector, 9); \
+ case 10: \
+ return vgetq_lane_##postfix(vector, 10); \
+ case 11: \
+ return vgetq_lane_##postfix(vector, 11); \
+ case 12: \
+ return vgetq_lane_##postfix(vector, 12); \
+ case 13: \
+ return vgetq_lane_##postfix(vector, 13); \
+ case 14: \
+ return vgetq_lane_##postfix(vector, 14); \
+ case 15: \
+ return vgetq_lane_##postfix(vector, 15); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
-#define VGETQLANE_IMPL_8(stype, vtype, postfix) \
- inline stype vgetqlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vgetq_lane_##postfix(vector, 0); \
- case 1: \
- return vgetq_lane_##postfix(vector, 1); \
- case 2: \
- return vgetq_lane_##postfix(vector, 2); \
- case 3: \
- return vgetq_lane_##postfix(vector, 3); \
- case 4: \
- return vgetq_lane_##postfix(vector, 4); \
- case 5: \
- return vgetq_lane_##postfix(vector, 5); \
- case 6: \
- return vgetq_lane_##postfix(vector, 6); \
- case 7: \
- return vgetq_lane_##postfix(vector, 7); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETQLANE_IMPL_8(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ case 2: \
+ return vgetq_lane_##postfix(vector, 2); \
+ case 3: \
+ return vgetq_lane_##postfix(vector, 3); \
+ case 4: \
+ return vgetq_lane_##postfix(vector, 4); \
+ case 5: \
+ return vgetq_lane_##postfix(vector, 5); \
+ case 6: \
+ return vgetq_lane_##postfix(vector, 6); \
+ case 7: \
+ return vgetq_lane_##postfix(vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
-#define VGETQLANE_IMPL_4(stype, vtype, postfix) \
- inline stype vgetqlane(const vtype vector, const int lane) \
- { \
- switch(lane) \
- { \
- case 0: \
- return vgetq_lane_##postfix(vector, 0); \
- case 1: \
- return vgetq_lane_##postfix(vector, 1); \
- case 2: \
- return vgetq_lane_##postfix(vector, 2); \
- case 3: \
- return vgetq_lane_##postfix(vector, 3); \
- default: \
- ARM_COMPUTE_ERROR("Invalid lane"); \
- } \
+#define VGETQLANE_IMPL_4(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ case 2: \
+ return vgetq_lane_##postfix(vector, 2); \
+ case 3: \
+ return vgetq_lane_##postfix(vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
}
VGETQLANE_IMPL_16(uint8_t, uint8x16_t, u8)
@@ -199,6 +199,10 @@ VGETQLANE_IMPL_8(float16_t, float16x8_t, f16)
#undef VGETLANE_IMPL_8
#undef VGETLANE_IMPL_4
#undef VGETLANE_IMPL_2
+
+#undef VGETQLANE_IMPL_16
+#undef VGETQLANE_IMPL_8
+#undef VGETQLANE_IMPL_4
} // namespace wrapper
} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_GET_LANE_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index 97af983e62..896e5106ab 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -52,6 +52,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/pmin.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/pow.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/rev64.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/setlane.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/sub.h"
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/setlane.h b/arm_compute/core/NEON/wrapper/intrinsics/setlane.h
new file mode 100644
index 0000000000..4eba1490c3
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/setlane.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_SET_LANE_H__
+#define __ARM_COMPUTE_WRAPPER_SET_LANE_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VSETLANE_IMPL_8(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vset_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vset_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vset_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vset_lane_##postfix(value, vector, 3); \
+ case 4: \
+ return vset_lane_##postfix(value, vector, 4); \
+ case 5: \
+ return vset_lane_##postfix(value, vector, 5); \
+ case 6: \
+ return vset_lane_##postfix(value, vector, 6); \
+ case 7: \
+ return vset_lane_##postfix(value, vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETLANE_IMPL_4(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vset_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vset_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vset_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vset_lane_##postfix(value, vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETLANE_IMPL_2(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vset_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vset_lane_##postfix(value, vector, 1); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+VSETLANE_IMPL_8(uint8x8_t, uint8_t, uint8x8_t, u8)
+VSETLANE_IMPL_8(int8x8_t, int8_t, int8x8_t, s8)
+VSETLANE_IMPL_4(uint16x4_t, uint16_t, uint16x4_t, u16)
+VSETLANE_IMPL_4(int16x4_t, int16_t, int16x4_t, s16)
+VSETLANE_IMPL_2(uint32x2_t, uint32_t, uint32x2_t, u32)
+VSETLANE_IMPL_2(int32x2_t, int32_t, int32x2_t, s32)
+VSETLANE_IMPL_2(float32x2_t, float, float32x2_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSETLANE_IMPL_4(float16x4_t, float16_t, float16x4_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#define VSETQLANE_IMPL_16(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vsetq_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vsetq_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vsetq_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vsetq_lane_##postfix(value, vector, 3); \
+ case 4: \
+ return vsetq_lane_##postfix(value, vector, 4); \
+ case 5: \
+ return vsetq_lane_##postfix(value, vector, 5); \
+ case 6: \
+ return vsetq_lane_##postfix(value, vector, 6); \
+ case 7: \
+ return vsetq_lane_##postfix(value, vector, 7); \
+ case 8: \
+ return vsetq_lane_##postfix(value, vector, 8); \
+ case 9: \
+ return vsetq_lane_##postfix(value, vector, 9); \
+ case 10: \
+ return vsetq_lane_##postfix(value, vector, 10); \
+ case 11: \
+ return vsetq_lane_##postfix(value, vector, 11); \
+ case 12: \
+ return vsetq_lane_##postfix(value, vector, 12); \
+ case 13: \
+ return vsetq_lane_##postfix(value, vector, 13); \
+ case 14: \
+ return vsetq_lane_##postfix(value, vector, 14); \
+ case 15: \
+ return vsetq_lane_##postfix(value, vector, 15); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETQLANE_IMPL_8(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vsetq_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vsetq_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vsetq_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vsetq_lane_##postfix(value, vector, 3); \
+ case 4: \
+ return vsetq_lane_##postfix(value, vector, 4); \
+ case 5: \
+ return vsetq_lane_##postfix(value, vector, 5); \
+ case 6: \
+ return vsetq_lane_##postfix(value, vector, 6); \
+ case 7: \
+ return vsetq_lane_##postfix(value, vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETQLANE_IMPL_4(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch(lane) \
+ { \
+ case 0: \
+ return vsetq_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vsetq_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vsetq_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vsetq_lane_##postfix(value, vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+VSETQLANE_IMPL_16(uint8x16_t, uint8_t, uint8x16_t, u8)
+VSETQLANE_IMPL_16(int8x16_t, int8_t, int8x16_t, s8)
+VSETQLANE_IMPL_8(uint16x8_t, uint16_t, uint16x8_t, u16)
+VSETQLANE_IMPL_8(int16x8_t, int16_t, int16x8_t, s16)
+VSETQLANE_IMPL_4(uint32x4_t, uint32_t, uint32x4_t, u32)
+VSETQLANE_IMPL_4(int32x4_t, int32_t, int32x4_t, s32)
+VSETQLANE_IMPL_4(float32x4_t, float, float32x4_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSETQLANE_IMPL_8(float16x8_t, float16_t, float16x8_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VSETLANE_IMPL_8
+#undef VSETLANE_IMPL_4
+#undef VSETLANE_IMPL_2
+
+#undef VSETQLANE_IMPL_16
+#undef VSETQLANE_IMPL_8
+#undef VSETQLANE_IMPL_4
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_AET_LANE_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLRange.h b/arm_compute/runtime/CL/functions/CLRange.h
index 2614534f14..904e6ad20f 100644
--- a/arm_compute/runtime/CL/functions/CLRange.h
+++ b/arm_compute/runtime/CL/functions/CLRange.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,7 +39,7 @@ class ICLTensor;
class CLRange : public ICLSimpleFunction
{
public:
- /** Initialise the kernel's start, end, step and output tensor.
+ /** Initialize the kernel's start, end, step and output tensor.
*
* @param[out] output Output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] start The starting value of the sequence.
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index da61853785..ceebc7b844 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -107,6 +107,7 @@
#include "arm_compute/runtime/NEON/functions/NEQuantizationLayer.h"
#include "arm_compute/runtime/NEON/functions/NERNNLayer.h"
#include "arm_compute/runtime/NEON/functions/NEROIPoolingLayer.h"
+#include "arm_compute/runtime/NEON/functions/NERange.h"
#include "arm_compute/runtime/NEON/functions/NEReduceMean.h"
#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
#include "arm_compute/runtime/NEON/functions/NERemap.h"
diff --git a/arm_compute/runtime/NEON/functions/NERange.h b/arm_compute/runtime/NEON/functions/NERange.h
new file mode 100644
index 0000000000..d888c5106d
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NERange.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NERANGE_H__
+#define __ARM_COMPUTE_NERANGE_H__
+
+#include "arm_compute/core/NEON/kernels/NERangeKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Basic function to run @ref NERangeKernel
+ *
+ * @note The tensor data type for the output must be U8/S8/U16/S16/U32/S32/F16/F32.
+ * @note The function performs generates a sequence with the given start, end and step.
+ */
+class NERange : public IFunction
+{
+public:
+ /** Default constructor */
+ NERange();
+ /** Initialize the kernel's start, end, step and output tensor.
+ *
+ * @param[out] output Output tensor. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] start The starting value of the sequence.
+ * @param[in] end The ending (not including) value of the sequence.
+ * @param[in] step The gap between each pair of values in the sequence. Default is 1.
+ */
+ void configure(ITensor *output, float start, float end, float step = 1.f);
+ /** Static function to check if given info will lead to a valid configuration of @ref NERange
+ *
+ * @param[in] output Output tensor info. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+ * @param[in] start The starting value of the sequence.
+ * @param[in] end The ending (not including) value of the sequence.
+ * @param[in] step The gap between each pair of values in the sequence. Default is 1.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *output, float start, float end, float step = 1.f);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ NERangeKernel _kernel;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NERANGE_H__ */
diff --git a/src/core/CL/kernels/CLRangeKernel.cpp b/src/core/CL/kernels/CLRangeKernel.cpp
index f53fe87a70..ae8cc0fae5 100644
--- a/src/core/CL/kernels/CLRangeKernel.cpp
+++ b/src/core/CL/kernels/CLRangeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,12 +32,6 @@ using namespace arm_compute;
namespace
{
-size_t num_of_elements_in_range(const float start, const float end, const float step)
-{
- ARM_COMPUTE_ERROR_ON_MSG(step == 0, "CLRange Step cannot be 0");
- return size_t(std::ceil((end - start) / step));
-}
-
unsigned int get_num_elems_processed_per_iteration(const DataType dt)
{
unsigned int num_elems_processed_per_iteration = preferred_vector_width(CLKernelLibrary::get().get_device(), dt);
@@ -69,7 +63,7 @@ Status validate_arguments(const ITensorInfo &output, const float start, const fl
ARM_COMPUTE_RETURN_ERROR_ON_MSG((start == end), "start of the requested sequence must not be equal to the end");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(output.num_dimensions() != 1, "Output has to be a 1-D tensor");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output.tensor_shape().total_size() < num_of_elements_in_range(start, end, step), "Output tensor size is incorrect");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output.tensor_shape().total_size() < utils::num_of_elements_in_range(start, end, step), "Output tensor size is incorrect");
return Status{};
}
@@ -78,14 +72,14 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo &output, con
{
unsigned int num_elems_processed_per_iteration = get_num_elems_processed_per_iteration(output.data_type());
// Auto initialize output if not initialized
- auto_init_if_empty(output, TensorShape(num_of_elements_in_range(start, end, step)), 1, output.data_type(), output.quantization_info());
+ auto_init_if_empty(output, TensorShape(utils::num_of_elements_in_range(start, end, step)), 1, output.data_type(), output.quantization_info());
// Configure kernel window
Window win = calculate_max_window(output, Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal output_access(&output, 0, num_elems_processed_per_iteration);
bool window_changed = update_window_and_padding(win, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), TensorShape(num_of_elements_in_range(start, end, step))));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), TensorShape(utils::num_of_elements_in_range(start, end, step))));
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
diff --git a/src/core/NEON/kernels/NERangeKernel.cpp b/src/core/NEON/kernels/NERangeKernel.cpp
new file mode 100644
index 0000000000..627ce422bd
--- /dev/null
+++ b/src/core/NEON/kernels/NERangeKernel.cpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NERangeKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/NEAsymm.h"
+#include "arm_compute/core/NEON/wrapper/wrapper.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Validate.h"
+
+#include "utils/Utils.h"
+
+namespace arm_compute
+{
+namespace
+{
+template <typename T>
+void range_function(ITensor *output, float start, float step, const Window &window)
+{
+ const unsigned int num_elems_processed_per_iteration = 16 / sizeof(T);
+ /** NEON vector tag type. */
+ using ExactTagType = typename wrapper::traits::neon_bitvector<T, wrapper::traits::BitWidth::W128>::tag_type;
+
+ const auto step_vec = wrapper::vdup_n(static_cast<T>(step), ExactTagType{});
+ const auto start_vec = wrapper::vdup_n(static_cast<T>(start), ExactTagType{});
+ auto id_vec = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+
+ Iterator output_it(output, window);
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ for(unsigned int count = 0; count < num_elems_processed_per_iteration; ++count)
+ {
+ id_vec = wrapper::vsetlane(static_cast<T>(id.x() + count), id_vec, count);
+ }
+ // start + step * id
+ const auto res_vec = wrapper::vmla(start_vec, id_vec, step_vec);
+ const auto out_ptr = reinterpret_cast<T *>(output_it.ptr());
+ wrapper::vstore(out_ptr, res_vec);
+ },
+ output_it);
+}
+
+Status validate_arguments(const ITensorInfo &output, const float start, const float end, const float step)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output,
+ 1,
+ DataType::U8, DataType::S8,
+ DataType::U16, DataType::S16,
+ DataType::U32, DataType::S32,
+ DataType::F16, DataType::F32);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((start == end), "start of the requested sequence must not be equal to the end");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(((start < end) && (step <= 0)), "step must be greater than 0 when start < end");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(((start > end) && (step >= 0)), "step must be less than 0 when start > end");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!utils::check_value_range(start, output.data_type(), output.quantization_info()), "start value is outside the range of the data type");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!utils::check_value_range(end, output.data_type(), output.quantization_info()), "end value is outside the range of the data type");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!utils::check_value_range(step, output.data_type(), output.quantization_info()), "step value is outside the range of the data type");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((start == end), "start of the requested sequence must not be equal to the end");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output.num_dimensions() != 1, "Output has to be a 1-D tensor");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output.tensor_shape().total_size() < utils::num_of_elements_in_range(start, end, step), "Output tensor size is incorrect");
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo &output, const float start, const float end, const float step)
+{
+ const unsigned int num_elems_processed_per_iteration = 16 / output.element_size();
+
+ // Auto initialize output if not initialized
+ auto_init_if_empty(output, TensorShape(utils::num_of_elements_in_range(start, end, step)), 1, output.data_type(), output.quantization_info());
+
+ // Configure kernel window
+ Window win = calculate_max_window(output, Steps(num_elems_processed_per_iteration));
+ AccessWindowHorizontal output_access(&output, 0, num_elems_processed_per_iteration);
+ bool window_changed = update_window_and_padding(win, output_access);
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), TensorShape(utils::num_of_elements_in_range(start, end, step))));
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+}
+} // namespace
+
+NERangeKernel::NERangeKernel()
+ : _func(nullptr), _start(0), _end(1), _step(1), _output(nullptr)
+{
+}
+
+void NERangeKernel::configure(ITensor *output, float start, float end, float step)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(output);
+
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*(output->info()), start, end, step));
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(*(output->info()), start, end, step);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+
+ _start = start;
+ _end = end;
+ _step = step;
+ _output = output;
+ switch(_output->info()->data_type())
+ {
+ case DataType::U8:
+ _func = &range_function<uint8_t>;
+ break;
+ case DataType::U16:
+ _func = &range_function<uint16_t>;
+ break;
+ case DataType::U32:
+ _func = &range_function<uint32_t>;
+ break;
+ case DataType::S8:
+ _func = &range_function<int8_t>;
+ break;
+ case DataType::S16:
+ _func = &range_function<int16_t>;
+ break;
+ case DataType::S32:
+ _func = &range_function<int32_t>;
+ break;
+ case DataType::F32:
+ _func = &range_function<float>;
+ break;
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ case DataType::F16:
+ _func = &range_function<float16_t>;
+ break;
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type.");
+ break;
+ }
+
+ INEKernel::configure(win_config.second);
+}
+
+Status NERangeKernel::validate(const ITensorInfo *output, float start, float end, float step)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*output, start, end, step));
+ ARM_COMPUTE_RETURN_ON_ERROR((validate_and_configure_window(*(output->clone()), start, end, step)).first);
+
+ return Status{};
+}
+
+void NERangeKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON(_func == nullptr);
+
+ (*_func)(_output, _start, _step, window);
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/NEON/functions/NERange.cpp b/src/runtime/NEON/functions/NERange.cpp
new file mode 100644
index 0000000000..977d502286
--- /dev/null
+++ b/src/runtime/NEON/functions/NERange.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NERange.h"
+
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+namespace arm_compute
+{
+NERange::NERange()
+ : _kernel()
+{
+}
+
+void NERange::configure(ITensor *output, const float start, const float end, const float step)
+{
+ _kernel.configure(output, start, end, step);
+}
+
+Status NERange::validate(const ITensorInfo *output, const float start, const float end, const float step)
+{
+ return NERangeKernel::validate(output, start, end, step);
+}
+
+void NERange::run()
+{
+ NEScheduler::get().schedule(&_kernel, Window::DimX);
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/NEON/Range.cpp b/tests/validation/NEON/Range.cpp
new file mode 100644
index 0000000000..06351c8f66
--- /dev/null
+++ b/tests/validation/NEON/Range.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NERange.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/RangeFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+constexpr RelativeTolerance<float> tolerance(0.01f);
+constexpr AbsoluteTolerance<float> abs_tolerance(0.02f);
+
+const auto start_dataset = framework::dataset::make("Start", { float(3), float(-17), float(16) });
+const auto unsigned_start_dataset = framework::dataset::make("Start", { float(3), float(16) });
+const auto float_step_dataset = framework::dataset::make("Step", { float(1), float(-0.2f), float(0.2), float(12.2), float(-12.2), float(-1.2), float(-3), float(3) });
+const auto step_dataset = framework::dataset::make("Step", { float(1), float(12), float(-12), float(-1), float(-3), float(3) });
+const auto unsigned_step_dataset = framework::dataset::make("Step", { float(1), float(12), float(3) });
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(Range)
+
+// *INDENT-OFF*
+// clang-format off
+
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(32U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U), 1, DataType::U8),
+ TensorInfo(TensorShape(32U), 1, DataType::U8),
+ TensorInfo(TensorShape(32U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U), 1, DataType::U8),
+ TensorInfo(TensorShape(10U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(10U), 1, DataType::U8),
+ }),
+ framework::dataset::make("Start",{ 0.0f,
+ 15.0f,
+ 1500.0f,
+ 100.0f,
+ -15.0f,
+ 0.2f,
+ 2.0f,
+ 10.0f,
+ 10.0f
+ })),
+ framework::dataset::make("End",{ 100.0f,
+ 15.0f,
+ 2500.0f,
+ -1000.0f,
+ 15.0f,
+ 10.0f,
+ 10.0f,
+ 100.0f,
+ 100.0f
+ })),
+ framework::dataset::make("Step",{ 100.0f,
+ 15.0f,
+ 10.0f,
+ 100.0f,
+ -15.0f,
+ 1.0f,
+ 0.0f,
+ 10.0f,
+ 10.0f
+ })),
+ framework::dataset::make("Expected", { false, // 1-D tensor expected
+ false, // start == end
+ false, // output vector size insufficient
+ false, // sign of step incorrect
+ false, // sign of step incorrect
+ false, // data type incompatible
+ false, // step = 0
+ false, // invalid QASYMM8 datatype
+ true,
+ })),
+ output_info, start, end, step, expected)
+{
+ ARM_COMPUTE_EXPECT(bool(NERange::validate(&output_info, start, end, step)) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using NERangeFixture = RangeFixture<Tensor, Accessor, NERange, T>;
+
+TEST_SUITE(U8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERangeFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(
+ framework::dataset::make("DataType", DataType::U8),
+ unsigned_start_dataset),
+ unsigned_step_dataset),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo() })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance, 0.f, abs_tolerance);
+}
+TEST_SUITE_END() // U8
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERangeFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(
+ framework::dataset::make("DataType", DataType::S16),
+ start_dataset),
+ step_dataset),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo() })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance, 0.f, abs_tolerance);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(Float)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERangeFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(
+ framework::dataset::make("DataType", DataType::F16),
+ start_dataset),
+ float_step_dataset),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo() })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance, 0.f, abs_tolerance);
+}
+TEST_SUITE_END() // FP16
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERangeFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(
+ framework::dataset::make("DataType", DataType::F32),
+ start_dataset),
+ float_step_dataset),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo() })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance, 0.f, abs_tolerance);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+
+TEST_SUITE_END() // Range
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/utils/Utils.h b/utils/Utils.h
index 031a3726a1..ad71776803 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -194,6 +194,20 @@ inline std::string get_typestring(DataType data_type)
}
}
+/** Returns the number of elements required to go from start to end with the wanted step
+ *
+ * @param[in] start start value
+ * @param[in] end end value
+ * @param[in] step step value between each number in the wanted sequence
+ *
+ * @return number of elements to go from start value to end value using the wanted step
+ */
+inline size_t num_of_elements_in_range(const float start, const float end, const float step)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(step == 0, "Range Step cannot be 0");
+ return size_t(std::ceil((end - start) / step));
+}
+
/** Returns true if the value can be represented by the given data type
*
* @param[in] val value to be checked