aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorge Wort <george.wort@arm.com>2018-12-12 17:39:58 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-01-21 15:27:13 +0000
commitd88590f4022bfb6eda3bad4fa599727bab723667 (patch)
tree99061e540e21c4a285bd5637cbffc359aa3d27b6
parent7ad6257ff09c94aade46ce5d02b644821235121a (diff)
downloadComputeLibrary-d88590f4022bfb6eda3bad4fa599727bab723667.tar.gz
COMPMID-1753: NEON: Implement Less, Greater, GreaterEqual, Equal, Not Equal
Change-Id: I6fa95badcdecb826ac5bd9113f118603d5ac2e82 Reviewed-on: https://review.mlplatform.org/393 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h77
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/cge.h64
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h2
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/not.h64
-rw-r--r--arm_compute/runtime/NEON/functions/NEElementwiseOperations.h82
-rw-r--r--src/core/NEON/kernels/NEElementwiseOperationKernel.cpp618
-rw-r--r--src/runtime/NEON/functions/NEElementwiseOperators.cpp32
-rw-r--r--tests/validation/NEON/Comparisons.cpp176
8 files changed, 972 insertions, 143 deletions
diff --git a/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h b/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h
index 93ad437322..f02f71b50e 100644
--- a/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -14,9 +14,9 @@
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INNEUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY NEAIM, DAMAGES OR OTHER
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
@@ -60,37 +60,35 @@ public:
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
+ /** Common signature for all the specialised arithmetic functions
+ *
+ * @param[in] input1 First tensor input. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor. Data types supported: Dependent on subclass.
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using ElementwiseFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window);
+
protected:
/** Validate the argument passed to the kernel
*
- * @param[in] input1 First tensor input. Data types supported: S16/F16/S32/F32.
+ * @param[in] input1 First tensor input. Data types supported: QASYMM8/S16/F16/S32/F32.
* @param[in] input2 Second tensor input. Data types supported: Same as @p input1.
- * @param[in] output Output tensor. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor. Data types supported: Dependent on subclass.
*/
- virtual Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) = 0;
+ static Status validate_arguments_common(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output);
/** Commmon configure function for element-wise operators with no additional options (e.g. Min, Max, SquaredDiff)
*
*/
- template <ArithmeticOperation op>
void configure_common(const ITensor *input1, const ITensor *input2, ITensor *output);
- ArithmeticOperation _op; // Code of the operation to execute
+ /** Function to use for the particular tensor types passed to configure() */
+ std::function<void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window)> _function;
-private:
- /** Common signature for all the specialised add functions
- *
- * @param[in] input1 An input tensor. Data types supported: S16/F16/S32/F32
- * @param[in] input2 An input tensor. Data types supported: S16/F16/S32/F32
- * @param[out] output The output tensor. Data types supported: S16/F16/S32/F32
- * @param[in] window Region on which to execute the kernel.
- */
- using ElementwiseFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window);
- /** Add function to use for the particular tensor types passed to configure() */
- ElementwiseFunction *_func;
- const ITensor *_input1;
- const ITensor *_input2;
- ITensor *_output;
+ const ITensor *_input1;
+ const ITensor *_input2;
+ ITensor *_output;
};
class NEArithmeticOperationKernel : public NEElementwiseOperationKernel
@@ -125,7 +123,40 @@ public:
protected:
// Inherited methods overridden:
- Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) override;
+ static Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output);
+};
+
+class NEComparisonOperationKernel : public NEElementwiseOperationKernel
+{
+public:
+ NEComparisonOperationKernel()
+ : NEElementwiseOperationKernel()
+ {
+ }
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel
+ *
+ * @param[in] op Comparison operation to be executed.
+ * @param[in] input1 First tensor input. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor. Data types supported: U16/U32.
+ */
+ void configure(ComparisonOperation op, const ITensor *input1, const ITensor *input2, ITensor *output);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel
+ *
+ * @param[in] op Comparison operation to be executed.
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor info. Data types supported: U16/U32.
+ *
+ * @return a Status
+ */
+ static Status validate(ComparisonOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+
+protected:
+ // Inherited methods overridden:
+ static Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output);
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEELEMENTWISEOPERATIONKERNEL_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/cge.h b/arm_compute/core/NEON/wrapper/intrinsics/cge.h
new file mode 100644
index 0000000000..168a6f597d
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/cge.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_CGE_H__
+#define __ARM_COMPUTE_WRAPPER_CGE_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCGE_IMPL(stype, vtype, rtype, prefix, postfix) \
+ inline rtype vcge(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VCGE_IMPL(uint8_t, uint8x8_t, uint8x8_t, vcge, u8)
+VCGE_IMPL(int8_t, int8x8_t, uint8x8_t, vcge, s8)
+VCGE_IMPL(uint16_t, uint16x4_t, uint16x4_t, vcge, u16)
+VCGE_IMPL(int16_t, int16x4_t, uint16x4_t, vcge, s16)
+VCGE_IMPL(uint32_t, uint32x2_t, uint32x2_t, vcge, u32)
+VCGE_IMPL(int32_t, int32x2_t, uint32x2_t, vcge, s32)
+VCGE_IMPL(float32x2_t, float32x2_t, uint32x2_t, vcge, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGE_IMPL(float16x4_t, float16x4_t, uint16x4_t, vcge, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VCGE_IMPL(uint8_t, uint8x16_t, uint8x16_t, vcgeq, u8)
+VCGE_IMPL(int8_t, int8x16_t, uint8x16_t, vcgeq, s8)
+VCGE_IMPL(uint16_t, uint16x8_t, uint16x8_t, vcgeq, u16)
+VCGE_IMPL(int16_t, int16x8_t, uint16x8_t, vcgeq, s16)
+VCGE_IMPL(uint32_t, uint32x4_t, uint32x4_t, vcgeq, u32)
+VCGE_IMPL(int32_t, int32x4_t, uint32x4_t, vcgeq, s32)
+VCGE_IMPL(float32x4_t, float32x4_t, uint32x4_t, vcgeq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGE_IMPL(float16x8_t, float16x8_t, uint16x8_t, vcgeq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VCGE_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_CGE_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index a0193ee3d2..c8f4a6e041 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -29,6 +29,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/and.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/bsl.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/ceq.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/cge.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/cgt.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/clt.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/combine.h"
@@ -48,6 +49,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/movn.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/mul.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/neg.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/not.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/orr.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/padd.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/pmax.h"
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/not.h b/arm_compute/core/NEON/wrapper/intrinsics/not.h
new file mode 100644
index 0000000000..33ac6b5634
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/not.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_NOT_H__
+#define __ARM_COMPUTE_WRAPPER_NOT_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VNOT_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vnot(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VNOT_IMPL(uint8_t, uint8x8_t, vmvn, u8)
+VNOT_IMPL(int8_t, int8x8_t, vmvn, s8)
+VNOT_IMPL(uint16_t, uint16x4_t, vmvn, u16)
+VNOT_IMPL(int16_t, int16x4_t, vmvn, s16)
+VNOT_IMPL(uint32_t, uint32x2_t, vmvn, u32)
+VNOT_IMPL(int32_t, int32x2_t, vmvn, s32)
+VNOT_IMPL(float32x2_t, float32x2_t, vinv, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VNOT_IMPL(float16x4_t, float16x4_t, vinv, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VNOT_IMPL(uint8_t, uint8x16_t, vmvnq, u8)
+VNOT_IMPL(int8_t, int8x16_t, vmvnq, s8)
+VNOT_IMPL(uint16_t, uint16x8_t, vmvnq, u16)
+VNOT_IMPL(int16_t, int16x8_t, vmvnq, s16)
+VNOT_IMPL(uint32_t, uint32x4_t, vmvnq, u32)
+VNOT_IMPL(int32_t, int32x4_t, vmvnq, s32)
+VNOT_IMPL(float32x4_t, float32x4_t, vinvq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VNOT_IMPL(float16x8_t, float16x8_t, vinvq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VNOT_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_NOT_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
index 5cbf1237e4..cd9ed24bee 100644
--- a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
+++ b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -14,10 +14,10 @@
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INNEUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY NEAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARI SING FROM,
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
@@ -33,7 +33,7 @@ class ITensor;
/** Basic function to run @ref NEArithmeticOperationKernel for max
*
- * @note The tensor data type for the inputs must be S16/F16/S32/F32.
+ * @note The tensor data type for the inputs must be QASYMM8/S16/F16/S32/F32.
* @note The function performs a max operation between two tensors.
*/
class NEElementwiseMax : public INESimpleFunction
@@ -59,7 +59,7 @@ public:
/** Basic function to run @ref NEArithmeticOperationKernel for min
*
- * @note The tensor data type for the inputs must be S16/F16/S32/F32.
+ * @note The tensor data type for the inputs must be QASYMM8/S16/F16/S32/F32.
* @note The function performs a max operation between two tensors.
*/
class NEElementwiseMin : public INESimpleFunction
@@ -85,7 +85,7 @@ public:
/** Basic function to run @ref NEArithmeticOperationKernel for squared difference
*
- * @note The tensor data type for the inputs must be S16/F16/S32/F32.
+ * @note The tensor data type for the inputs must be QASYMM8/S16/F16/S32/F32.
* @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
*/
class NEElementwiseSquaredDiff : public INESimpleFunction
@@ -108,5 +108,73 @@ public:
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
};
+
+/** Basic function to run @ref NEComparisonOperationKernel
+ *
+ * @note The tensor data type for the inputs must be QASYMM8/S16/F16/S32/F32.
+ * @note The function performs a comparison operation between two tensors.
+ */
+class NEElementwiseComparison : public INESimpleFunction
+{
+public:
+ /** Initialise the kernel's inputs, output and conversion policy.
+ *
+ * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in, out] input2 Second tensor input. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor. Data types supported: U16/U32.
+ * @param[in] op Comparison Operation to be performed.
+ */
+ void configure(ITensor *input1, ITensor *input2, ITensor *output, ComparisonOperation op);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel
+ *
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor info. Data types supported: U16/U32.
+ * @param[in] op Comparison Operation to be performed.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ComparisonOperation op);
+};
+
+/** Basic function to run @ref NEComparisonOperationKernel
+ *
+ * @note The tensor data type for the inputs must be QASYMM8/S16/F16/S32/F32.
+ * @note The function performs a comparison operation between two tensors.
+ */
+template <ComparisonOperation op>
+class NEElementwiseComparisonStatic : public INESimpleFunction
+{
+public:
+ /** Initialise the kernel's inputs, output and conversion policy.
+ *
+ * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in, out] input2 Second tensor input. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor. Data types supported: U16/U32.
+ */
+ void configure(ITensor *input1, ITensor *input2, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel
+ *
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor info. Data types supported: U16/U32.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+};
+
+/** Basic function to run equal comparison. */
+using NEEqual = NEElementwiseComparisonStatic<ComparisonOperation::Equal>;
+/** Basic function to run not equal comparison. */
+using NENotEqual = NEElementwiseComparisonStatic<ComparisonOperation::NotEqual>;
+/** Basic function to run greater comparison. */
+using NEGreater = NEElementwiseComparisonStatic<ComparisonOperation::Greater>;
+/** Basic function to run greater-equal comparison. */
+using NEGreaterEqual = NEElementwiseComparisonStatic<ComparisonOperation::GreaterEqual>;
+/** Basic function to run less comparison. */
+using NELess = NEElementwiseComparisonStatic<ComparisonOperation::Less>;
+/** Basic function to run less-equal comparison. */
+using NELessEqual = NEElementwiseComparisonStatic<ComparisonOperation::LessEqual>;
} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEELEMENTWISEOPERATIONS_H__ */
diff --git a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp
index ee9c10014f..88fd730554 100644
--- a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp
+++ b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -61,6 +61,20 @@ float32x4x4_t load_quantized(const uint8_t *input1_ptr, const int32x4_t &offset,
return out;
}
+void store_quantized(uint8_t *output_ptr, const uint32x4x4_t &out)
+{
+ const uint8x8_t pa = vqmovn_u16(vcombine_u16(vqmovn_u32(out.val[0]), vqmovn_u32(out.val[1])));
+ const uint8x8_t pb = vqmovn_u16(vcombine_u16(vqmovn_u32(out.val[2]), vqmovn_u32(out.val[3])));
+ vst1q_u8(output_ptr, vcombine_u8(pa, pb));
+}
+
+void store_quantized(uint8_t *output_ptr, const int32x4x4_t &out)
+{
+ const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(out.val[0]), vqmovn_s32(out.val[1])));
+ const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(out.val[2]), vqmovn_s32(out.val[3])));
+ vst1q_u8(output_ptr, vcombine_u8(pa, pb));
+}
+
void store_quantized(uint8_t *output_ptr, const float32x4x4_t &rf, const float32x4_t &offset, const float32x4_t &invscale)
{
int32x4x4_t out =
@@ -70,10 +84,7 @@ void store_quantized(uint8_t *output_ptr, const float32x4x4_t &rf, const float32
vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)),
vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)),
};
-
- const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(out.val[0]), vqmovn_s32(out.val[1])));
- const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(out.val[2]), vqmovn_s32(out.val[3])));
- vst1q_u8(output_ptr, vcombine_u8(pa, pb));
+ store_quantized(output_ptr, out);
}
float32x4x4_t dup_quantized(qasymm8_t broadcast_value, int offset, float scale)
@@ -95,7 +106,7 @@ float32x4x4_t dup_quantized(qasymm8_t broadcast_value, int offset, float scale)
}
template <ArithmeticOperation op, typename ScalarType>
-inline ScalarType elementwise_op_scalar(const ScalarType &a, const ScalarType &b)
+inline ScalarType elementwise_arithm_op_scalar(const ScalarType &a, const ScalarType &b)
{
auto res = ScalarType(0);
@@ -118,8 +129,14 @@ inline ScalarType elementwise_op_scalar(const ScalarType &a, const ScalarType &b
return res;
}
+template <ArithmeticOperation op>
+inline uint8_t elementwise_arithm_op_quantized_scalar(const float &a, const float &b, QuantizationInfo qinfo)
+{
+ return qinfo.quantize(elementwise_arithm_op_scalar<op>(a, b), RoundingPolicy::TO_NEAREST_UP);
+}
+
template <ArithmeticOperation op, typename VectorType>
-inline VectorType elementwise_op(const VectorType &a, const VectorType &b)
+inline VectorType elementwise_arithm_op(const VectorType &a, const VectorType &b)
{
VectorType res = { 0, 0, 0, 0 };
@@ -145,28 +162,297 @@ inline VectorType elementwise_op(const VectorType &a, const VectorType &b)
return res;
}
-template <ArithmeticOperation op, typename VectorType, typename ScalarType>
-inline VectorType elementwise_op_broadcast(const VectorType &a, const ScalarType &broadcast_value)
+template <ArithmeticOperation op>
+inline float32x4x4_t elementwise_arithm_op(const float32x4x4_t &a, const float32x4x4_t &b)
+{
+ float32x4x4_t out =
+ {
+ elementwise_arithm_op<op>(a.val[0], b.val[0]),
+ elementwise_arithm_op<op>(a.val[1], b.val[1]),
+ elementwise_arithm_op<op>(a.val[2], b.val[2]),
+ elementwise_arithm_op<op>(a.val[3], b.val[3]),
+ };
+ return out;
+}
+
+template <ArithmeticOperation op, typename ScalarType, typename VectorType>
+inline VectorType elementwise_arithm_op_broadcast(const VectorType &a, const ScalarType &broadcast_value, const bool reorder)
{
VectorType broadcast_vector = wrapper::vdup_n(broadcast_value, wrapper::traits::vector_128_tag());
- return elementwise_op<op>(a, broadcast_vector);
+ return elementwise_arithm_op<op>(reorder ? broadcast_vector : a, reorder ? a : broadcast_vector);
}
-template <ArithmeticOperation op>
-float32x4x4_t elementwise_op(const float32x4x4_t &a, const float32x4x4_t &b)
+template <ComparisonOperation op, typename InputScalarType>
+inline uint8_t elementwise_comp_op_scalar(const InputScalarType &a, const InputScalarType &b)
{
- float32x4x4_t out =
+ bool res = false;
+
+ switch(op)
+ {
+ case ComparisonOperation::Equal:
+ res = (a == b);
+ break;
+ case ComparisonOperation::NotEqual:
+ res = (a != b);
+ break;
+ case ComparisonOperation::Greater:
+ res = (a > b);
+ break;
+ case ComparisonOperation::GreaterEqual:
+ res = (a >= b);
+ break;
+ case ComparisonOperation::Less:
+ res = (a < b);
+ break;
+ case ComparisonOperation::LessEqual:
+ res = (a <= b);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+ return res ? ~static_cast<uint8_t>(0) : static_cast<uint8_t>(0);
+}
+
+template <ComparisonOperation op>
+inline uint8_t elementwise_comp_op_quantized_scalar(const float &a, const float &b, QuantizationInfo qinfo)
+{
+ ARM_COMPUTE_UNUSED(qinfo);
+ return elementwise_comp_op_scalar<op>(a, b);
+}
+
+template <ComparisonOperation op, typename InputVectorType, typename OutputVectorType>
+inline OutputVectorType elementwise_comp_op(const InputVectorType &a, const InputVectorType &b)
+{
+ OutputVectorType res = { 0, 0, 0, 0 };
+
+ switch(op)
+ {
+ case ComparisonOperation::Equal:
+ res = wrapper::vceq(a, b);
+ break;
+ case ComparisonOperation::NotEqual:
+ res = wrapper::vnot(wrapper::vceq(a, b));
+ break;
+ case ComparisonOperation::Greater:
+ res = wrapper::vcgt(a, b);
+ break;
+ case ComparisonOperation::GreaterEqual:
+ res = wrapper::vcge(a, b);
+ break;
+ case ComparisonOperation::Less:
+ res = wrapper::vcgt(b, a);
+ break;
+ case ComparisonOperation::LessEqual:
+ res = wrapper::vcge(b, a);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+
+ return res;
+}
+
+template <ComparisonOperation op>
+inline uint32x4x4_t elementwise_comp_op(const float32x4x4_t &a, const float32x4x4_t &b)
+{
+ uint32x4x4_t out =
{
- elementwise_op<op>(a.val[0], b.val[0]),
- elementwise_op<op>(a.val[1], b.val[1]),
- elementwise_op<op>(a.val[2], b.val[2]),
- elementwise_op<op>(a.val[3], b.val[3]),
+ elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[0], b.val[0]),
+ elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[1], b.val[1]),
+ elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[2], b.val[2]),
+ elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[3], b.val[3])
};
return out;
}
-template <ArithmeticOperation op, typename ScalarType>
-void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+template <ComparisonOperation op, typename InputScalarType, typename InputVectorType, typename OutputVectorType>
+inline OutputVectorType elementwise_comp_op_broadcast(const InputVectorType &a, const InputScalarType &broadcast_value, const bool reorder)
+{
+ InputVectorType broadcast_vector = wrapper::vdup_n(broadcast_value, wrapper::traits::vector_128_tag());
+ return elementwise_comp_op<op, InputVectorType, OutputVectorType>(reorder ? broadcast_vector : a, reorder ? a : broadcast_vector);
+}
+
+template <ArithmeticOperation op, typename ScalarType, typename VectorType>
+inline int elementwise_arithm_op_loop(int window_start_x, int window_end_x, int window_step_x,
+ const ScalarType *input1_ptr, const ScalarType *input2_ptr, ScalarType *output_ptr)
+{
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto a = wrapper::vloadq(input1_ptr + x);
+ const auto b = wrapper::vloadq(input2_ptr + x);
+ wrapper::vstore(output_ptr + x, elementwise_arithm_op<op>(a, b));
+ }
+ return x;
+}
+
+template <ArithmeticOperation op>
+inline int elementwise_arithm_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x,
+ const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr,
+ int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
+ float32x4_t voffseto, float32x4_t invvscaleo)
+{
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Get inputs and compute output
+ const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1);
+ const float32x4x4_t bf = load_quantized(input2_ptr + x, voffset2, vscale2);
+ const float32x4x4_t rf = elementwise_arithm_op<op>(af, bf);
+ store_quantized(output_ptr + x, rf, voffseto, invvscaleo);
+ }
+ return x;
+}
+
+template <ArithmeticOperation op, typename ScalarType, typename VectorType>
+inline int elementwise_arithm_op_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
+ const ScalarType *non_broadcast_input_ptr, const ScalarType &broadcast_value, ScalarType *output_ptr, const bool reorder)
+{
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto a = wrapper::vloadq((non_broadcast_input_ptr + x));
+ wrapper::vstore(output_ptr + x, elementwise_arithm_op_broadcast<op>(a, broadcast_value, reorder));
+ }
+ return x;
+}
+
+template <ArithmeticOperation op>
+inline int elementwise_arithm_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
+ const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr,
+ int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
+ float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
+{
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const float32x4x4_t af = load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
+ const float32x4x4_t rf = elementwise_arithm_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
+ store_quantized(output_ptr + x, rf, voffseto, invvscaleo);
+ }
+ return x;
+}
+
+template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
+inline int elementwise_comp_op_16_loop(int window_start_x, int window_end_x, int window_step_x,
+ const InputScalarType *input1_ptr, const InputScalarType *input2_ptr, uint8_t *output_ptr)
+{
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto a = wrapper::vloadq(input1_ptr + x);
+ const auto b = wrapper::vloadq(input2_ptr + x);
+ const auto res = elementwise_comp_op<op, InputVectorType, uint16x8_t>(a, b);
+ wrapper::vstore(output_ptr + x, wrapper::vmovn(res));
+ }
+ return x;
+}
+
+template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
+inline int elementwise_comp_op_32_loop(int window_start_x, int window_end_x, int window_step_x,
+ const InputScalarType *input1_ptr, const InputScalarType *input2_ptr, uint8_t *output_ptr)
+{
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ auto a = wrapper::vloadq(input1_ptr + x);
+ auto b = wrapper::vloadq(input2_ptr + x);
+ const auto res = elementwise_comp_op<op, InputVectorType, uint32x4_t>(a, b);
+ a = wrapper::vloadq(input1_ptr + x + 4);
+ b = wrapper::vloadq(input2_ptr + x + 4);
+ const auto res2 = elementwise_comp_op<op, InputVectorType, uint32x4_t>(a, b);
+ wrapper::vstore(output_ptr + x, wrapper::vmovn(wrapper::vcombine(wrapper::vmovn(res), wrapper::vmovn(res2))));
+ }
+ if(x <= window_end_x - 4)
+ {
+ const auto a = wrapper::vloadq(input1_ptr + x);
+ const auto b = wrapper::vloadq(input2_ptr + x);
+ const auto res = elementwise_comp_op<op, InputVectorType, uint32x4_t>(a, b);
+ for(int i = 0; i < 4; i++)
+ {
+ *(output_ptr + x + i) = wrapper::vgetlane(res, i);
+ }
+ x = +4;
+ }
+ return x;
+}
+
+template <ComparisonOperation op>
+inline int elementwise_comp_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x,
+ const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr,
+ int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
+ float32x4_t voffseto, float32x4_t invvscaleo)
+{
+ ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1);
+ const float32x4x4_t bf = load_quantized(input2_ptr + x, voffset2, vscale2);
+ const uint32x4x4_t rf = elementwise_comp_op<op>(af, bf);
+ store_quantized(output_ptr + x, rf);
+ }
+ return x;
+}
+
+template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
+inline int elementwise_comp_op_broadcast_16_loop(int window_start_x, int window_end_x, int window_step_x,
+ const InputScalarType *non_broadcast_input_ptr, const InputScalarType &broadcast_value, uint8_t *output_ptr, const bool reorder)
+{
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto a = elementwise_comp_op_broadcast<op, InputScalarType, InputVectorType, uint16x8_t>(wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder);
+ wrapper::vstore(output_ptr + x, wrapper::vmovn(a));
+ }
+ return x;
+}
+
+template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
+inline int elementwise_comp_op_broadcast_32_loop(int window_start_x, int window_end_x, int window_step_x,
+ const InputScalarType *non_broadcast_input_ptr, const InputScalarType &broadcast_value, uint8_t *output_ptr, const bool reorder)
+{
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto a = elementwise_comp_op_broadcast<op, InputScalarType, InputVectorType, uint32x4_t>(wrapper::vloadq(non_broadcast_input_ptr + x), broadcast_value, reorder);
+ const auto b = elementwise_comp_op_broadcast<op, InputScalarType, InputVectorType, uint32x4_t>(wrapper::vloadq(non_broadcast_input_ptr + x + 4), broadcast_value, reorder);
+ wrapper::vstore(output_ptr + x, wrapper::vmovn(wrapper::vcombine(wrapper::vmovn(a), wrapper::vmovn(b))));
+ }
+ if(x <= window_end_x - 4)
+ {
+ const auto a = elementwise_comp_op_broadcast<op, InputScalarType, InputVectorType, uint32x4_t>(wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder);
+ for(int i = 0; i < 4; i++)
+ {
+ *(output_ptr + x + i) = wrapper::vgetlane(a, i);
+ }
+ x = +4;
+ }
+ return x;
+}
+
+template <ComparisonOperation op>
+inline int elementwise_comp_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
+ const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr,
+ int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
+ float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
+{
+ ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const float32x4x4_t af = load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
+ const uint32x4x4_t rf = elementwise_comp_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
+ store_quantized(output_ptr + x, rf);
+ }
+ return x;
+}
+
+template <typename InputScalarType, typename OutputScalarType, typename InputVectorType>
+void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
+ OutputScalarType (*scalar_func)(const InputScalarType &, const InputScalarType &),
+ int (*broadcast_func)(int, int, int, const InputScalarType *, const InputScalarType &, OutputScalarType *, const bool),
+ int (*neon_func)(int, int, int, const InputScalarType *, const InputScalarType *, OutputScalarType *))
{
// Create input windows
Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
@@ -176,14 +462,13 @@ void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const
Window win = window;
win.set(Window::DimX, Window::Dimension(0, 1, 1));
- const int window_step_x = 16 / in1->info()->element_size();
+ const int window_step_x = std::min(16 / static_cast<int32_t>(sizeof(OutputScalarType)), 8);
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
const bool is_broadcast_across_x = (input1_win.x().step() == 0) || (input2_win.x().step() == 0);
if(is_broadcast_across_x)
{
- // Select the broadcast input on the X axis
const bool is_broadcast_input_2 = input2_win.x().step() == 0;
Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
@@ -199,20 +484,15 @@ void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const
execute_window_loop(win, [&](const Coordinates & id)
{
- auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
- const auto non_broadcast_input_ptr = reinterpret_cast<const ScalarType *>(non_broadcast_input.ptr());
- const ScalarType broadcast_value = *reinterpret_cast<const ScalarType *>(broadcast_input.ptr());
+ auto output_ptr = reinterpret_cast<OutputScalarType *>(output.ptr());
+ const auto non_broadcast_input_ptr = reinterpret_cast<const InputScalarType *>(non_broadcast_input.ptr());
+ const InputScalarType broadcast_value = *reinterpret_cast<const InputScalarType *>(broadcast_input.ptr());
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto a = wrapper::vloadq((non_broadcast_input_ptr + x));
- wrapper::vstore(output_ptr + x, elementwise_op_broadcast<op>(a, broadcast_value));
- }
+ int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_value, output_ptr, !is_broadcast_input_2);
for(; x < window_end_x; ++x)
{
const auto a = *(non_broadcast_input_ptr + x);
- *(output_ptr + x) = elementwise_op_scalar<op>(a, broadcast_value);
+ *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? broadcast_value : a, !is_broadcast_input_2 ? a : broadcast_value);
}
},
broadcast_input, non_broadcast_input, output);
@@ -229,31 +509,29 @@ void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const
execute_window_loop(win, [&](const Coordinates & id)
{
- auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
- const auto input1_ptr = reinterpret_cast<const ScalarType *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const ScalarType *>(input2.ptr());
+ auto output_ptr = reinterpret_cast<OutputScalarType *>(output.ptr());
+ const auto input1_ptr = reinterpret_cast<const InputScalarType *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const InputScalarType *>(input2.ptr());
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto a = wrapper::vloadq(input1_ptr + x);
- const auto b = wrapper::vloadq(input2_ptr + x);
- wrapper::vstore(output_ptr + x, elementwise_op<op>(a, b));
- }
+ int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr);
for(; x < window_end_x; ++x)
{
const auto a = *(input1_ptr + x);
const auto b = *(input2_ptr + x);
- *(output_ptr + x) = elementwise_op_scalar<op>(a, b);
+ *(output_ptr + x) = (*scalar_func)(a, b);
}
-
},
input1, input2, output);
}
}
-template <ArithmeticOperation op>
-void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
+ uint8_t (*scalar_func)(const float &, const float &, QuantizationInfo),
+ int (*broadcast_func)(int, int, int, const uint8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t,
+ float32x4_t, float32x4_t, const bool),
+ int (*neon_func)(int, int, int, const uint8_t *, const uint8_t *, uint8_t *,
+ int32x4_t, int32x4_t, float32x4_t, float32x4_t,
+ float32x4_t, float32x4_t))
{
// Create input windows
Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
@@ -305,18 +583,14 @@ void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *o
const uint8_t broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr());
const float32x4x4_t broadcast_vector = dup_quantized(broadcast_value, broadcast_qinfo.offset, broadcast_qinfo.scale);
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const float32x4x4_t af = load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
- const float32x4x4_t rf = elementwise_op<op>(af, broadcast_vector);
- store_quantized(output_ptr + x, rf, voffseto, invvscaleo);
- }
+ int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr,
+ voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2);
for(; x < window_end_x; ++x)
{
- const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale;
- const float bfs = static_cast<int32_t>(broadcast_value - broadcast_qinfo.offset) * broadcast_qinfo.scale;
- *(output_ptr + x) = out->info()->quantization_info().quantize(elementwise_op_scalar<op>(afs, bfs), RoundingPolicy::TO_NEAREST_UP);
+ const float afs = scvt_f32_qasymm8(*(non_broadcast_input_ptr + x), non_broadcast_qinfo.scale, non_broadcast_qinfo.offset);
+ const float bfs = scvt_f32_qasymm8(broadcast_value, broadcast_qinfo.scale, broadcast_qinfo.offset);
+ *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs,
+ out->info()->quantization_info());
}
},
broadcast_input, non_broadcast_input, output);
@@ -348,31 +622,131 @@ void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *o
const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- // Get inputs and compute output
- const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1);
- const float32x4x4_t bf = load_quantized(input2_ptr + x, voffset2, vscale2);
- const float32x4x4_t rf = elementwise_op<op>(af, bf);
- store_quantized(output_ptr + x, rf, voffseto, invvscaleo);
- }
+ int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2,
+ vscale1, vscale2, voffseto, invvscaleo);
for(; x < window_end_x; ++x)
{
- const float afs = static_cast<int32_t>((*(input1_ptr + x)) - input1_qinfo.offset) * input1_qinfo.scale;
- const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - input2_qinfo.offset) * input2_qinfo.scale;
- *(output_ptr + x) = out->info()->quantization_info().quantize(elementwise_op_scalar<op>(afs, bfs), RoundingPolicy::TO_NEAREST_UP);
+ const float afs = scvt_f32_qasymm8(*(input1_ptr + x), input1_qinfo.scale, input1_qinfo.offset);
+ const float bfs = scvt_f32_qasymm8(*(input2_ptr + x), input2_qinfo.scale, input2_qinfo.offset);
+ *(output_ptr + x) = (*scalar_func)(afs, bfs, out->info()->quantization_info());
}
},
input1, input2, output);
}
}
-Status validate_arguments_arithmetic(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
+template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
+void elementwise_comp_op_16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ elementwise_op<InputScalarType, uint8_t, InputVectorType>(in1, in2, out, window,
+ &elementwise_comp_op_scalar<op, InputScalarType>,
+ &elementwise_comp_op_broadcast_16_loop<op, InputScalarType, InputVectorType>,
+ &elementwise_comp_op_16_loop<op, InputScalarType, InputVectorType>);
+}
+
+template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
+void elementwise_comp_op_32(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ elementwise_op<InputScalarType, uint8_t, InputVectorType>(in1, in2, out, window,
+ &elementwise_comp_op_scalar<op, InputScalarType>,
+ &elementwise_comp_op_broadcast_32_loop<op, InputScalarType, InputVectorType>,
+ &elementwise_comp_op_32_loop<op, InputScalarType, InputVectorType>);
+}
+
+template <ArithmeticOperation op, typename ScalarType, typename VectorType>
+void elementwise_arithm_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ elementwise_op<ScalarType, ScalarType, VectorType>(in1, in2, out, window,
+ &elementwise_arithm_op_scalar<op, ScalarType>,
+ &elementwise_arithm_op_broadcast_loop<op, ScalarType, VectorType>,
+ &elementwise_arithm_op_loop<op, ScalarType, VectorType>);
+}
+
+template <ArithmeticOperation op>
+void elementwise_arithm_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ elementwise_op_quantized(in1, in2, out, window, &elementwise_arithm_op_quantized_scalar<op>,
+ &elementwise_arithm_op_quantized_broadcast_loop<op>,
+ &elementwise_arithm_op_quantized_loop<op>);
+}
+
+template <ComparisonOperation op>
+void elementwise_comp_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ elementwise_op_quantized(in1, in2, out, window, &elementwise_comp_op_quantized_scalar<op>,
+ &elementwise_comp_op_quantized_broadcast_loop<op>,
+ &elementwise_comp_op_quantized_loop<op>);
+}
+
+std::function<void(const ITensor *, const ITensor *, ITensor *, const Window &)>
+configure_func(const ITensor *input1, const ITensor *input2, ITensor *output,
+ std::map<std::string, NEElementwiseOperationKernel::ElementwiseFunction *> map_function)
+{
+ std::string function_to_call("op_");
+ function_to_call += string_from_data_type(input1->info()->data_type()) + "_";
+ function_to_call += string_from_data_type(input2->info()->data_type()) + "_";
+ function_to_call += string_from_data_type(output->info()->data_type());
+
+ auto it = map_function.find(function_to_call);
+
+ if(it != map_function.end())
+ {
+ auto func = it->second;
+ return [func](const ITensor * input1, const ITensor * input2, ITensor * output, const Window & window)
+ {
+ func(input1, input2, output, window);
+ };
+ }
+ return nullptr;
+}
+
+template <ArithmeticOperation op>
+std::function<void(const ITensor *, const ITensor *, ITensor *, const Window &)>
+configure_arithm_func(const ITensor *input1, const ITensor *input2, ITensor *output)
+{
+ static std::map<std::string, NEElementwiseOperationKernel::ElementwiseFunction *> map_function =
+ {
+ { "op_F32_F32_F32", &elementwise_arithm_op<op, float, float32x4_t> },
+ { "op_S16_S16_S16", &elementwise_arithm_op<op, int16_t, int16x8_t> },
+ { "op_S32_S32_S32", &elementwise_arithm_op<op, int32_t, int32x4_t> },
+ { "op_QASYMM8_QASYMM8_QASYMM8", &elementwise_arithm_op_quantized<op> }
+ };
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ map_function["op_F16_F16_F16"] = &elementwise_arithm_op<op, float16_t, float16x8_t>;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+ return configure_func(input1, input2, output, map_function);
+}
+
+template <ComparisonOperation op>
+std::function<void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window)>
+configure_comp_func(const ITensor *input1, const ITensor *input2, ITensor *output)
+{
+ static std::map<std::string, NEElementwiseOperationKernel::ElementwiseFunction *> map_function =
+ {
+ { "op_F32_F32_U8", &elementwise_comp_op_32<op, float, float32x4_t> },
+ { "op_S16_S16_U8", &elementwise_comp_op_16<op, int16_t, int16x8_t> },
+ { "op_S32_S32_U8", &elementwise_comp_op_32<op, int32_t, int32x4_t> },
+ { "op_QASYMM8_QASYMM8_U8", &elementwise_comp_op_quantized<op> }
+ };
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ map_function["op_F16_F16_U8"] = &elementwise_comp_op_16<op, float16_t, float16x8_t>;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+ return configure_func(input1, input2, output, map_function);
+}
+} // namespace
+
+NEElementwiseOperationKernel::NEElementwiseOperationKernel()
+ : _function(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr)
+{
+}
+
+Status NEElementwiseOperationKernel::validate_arguments_common(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
{
- ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input1);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::S32, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::S32, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input1);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &input2);
const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
@@ -382,24 +756,16 @@ Status validate_arguments_arithmetic(const ITensorInfo &input1, const ITensorInf
// Validate in case of configured output
if(output.total_size() > 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &output);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
"Wrong shape for output");
}
return Status{};
}
-} // namespace
-NEElementwiseOperationKernel::NEElementwiseOperationKernel()
- : _op(), _func(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr)
-{
-}
-template <ArithmeticOperation op>
void NEElementwiseOperationKernel::configure_common(const ITensor *input1, const ITensor *input2, ITensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info()));
// Configure kernel window
const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1->info(), *input2->info());
@@ -411,76 +777,108 @@ void NEElementwiseOperationKernel::configure_common(const ITensor *input1, const
Window win = calculate_max_window(valid_region);
- static std::map<std::string, ElementwiseFunction *> map_function =
- {
- { "op_F32_F32_F32", &elementwise_op<op, float> },
- { "op_S16_S16_S16", &elementwise_op<op, int16_t> },
- { "op_S32_S32_S32", &elementwise_op<op, int32_t> },
- { "op_QASYMM8_QASYMM8_QASYMM8", &elementwise_op_quantized<op> }
- };
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- map_function["op_F16_F16_F16"] = &elementwise_op<op, float16_t>;
-#endif /* ARM_COMPUTE_AARCH64_V8_2 */
_input1 = input1;
_input2 = input2;
_output = output;
- std::string function_to_call("op_");
- function_to_call += string_from_data_type(input1->info()->data_type()) + "_";
- function_to_call += string_from_data_type(input2->info()->data_type()) + "_";
- function_to_call += string_from_data_type(output->info()->data_type());
- auto it = map_function.find(function_to_call);
-
- if(it != map_function.end())
- {
- _func = it->second;
- }
-
INEKernel::configure(win);
}
void NEElementwiseOperationKernel::run(const Window &window, const ThreadInfo &info)
{
- ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_UNUSED(info, window);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
- ARM_COMPUTE_ERROR_ON(_func == nullptr);
-
- (*_func)(_input1, _input2, _output, window);
+ ARM_COMPUTE_ERROR_ON(_function == nullptr);
+ _function(_input1, _input2, _output, window);
}
/** Arithmetic operators (min, max, squared_diff) */
void NEArithmeticOperationKernel::configure(ArithmeticOperation op, const ITensor *input1, const ITensor *input2, ITensor *output)
{
- _op = op;
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info()));
+ configure_common(input1, input2, output);
switch(op)
{
case ArithmeticOperation::MAX:
- configure_common<ArithmeticOperation::MAX>(input1, input2, output);
+ _function = configure_arithm_func<ArithmeticOperation::MAX>(input1, input2, output);
break;
case ArithmeticOperation::MIN:
- configure_common<ArithmeticOperation::MIN>(input1, input2, output);
+ _function = configure_arithm_func<ArithmeticOperation::MIN>(input1, input2, output);
break;
case ArithmeticOperation::SQUARED_DIFF:
- configure_common<ArithmeticOperation::SQUARED_DIFF>(input1, input2, output);
+ _function = configure_arithm_func<ArithmeticOperation::SQUARED_DIFF>(input1, input2, output);
break;
default:
ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
}
}
+Status NEArithmeticOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
+{
+ // Validate in case of configured output
+ if(output.total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &output);
+ }
+ return validate_arguments_common(input1, input2, output);
+}
+
Status NEArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
{
ARM_COMPUTE_UNUSED(op);
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
-
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_arithmetic(*input1, *input2, *output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output));
return Status{};
}
-Status NEArithmeticOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
+/** Comparison operators (equal, not equal, less than, greater than, less than or equal, greater than or equal) */
+
+void NEComparisonOperationKernel::configure(ComparisonOperation op, const ITensor *input1, const ITensor *input2, ITensor *output)
{
- return validate_arguments_arithmetic(input1, input2, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info()));
+ configure_common(input1, input2, output);
+ switch(op)
+ {
+ case ComparisonOperation::Equal:
+ _function = configure_comp_func<ComparisonOperation::Equal>(input1, input2, output);
+ break;
+ case ComparisonOperation::NotEqual:
+ _function = configure_comp_func<ComparisonOperation::NotEqual>(input1, input2, output);
+ break;
+ case ComparisonOperation::Greater:
+ _function = configure_comp_func<ComparisonOperation::Greater>(input1, input2, output);
+ break;
+ case ComparisonOperation::GreaterEqual:
+ _function = configure_comp_func<ComparisonOperation::GreaterEqual>(input1, input2, output);
+ break;
+ case ComparisonOperation::Less:
+ _function = configure_comp_func<ComparisonOperation::Less>(input1, input2, output);
+ break;
+ case ComparisonOperation::LessEqual:
+ _function = configure_comp_func<ComparisonOperation::LessEqual>(input1, input2, output);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+}
+
+Status NEComparisonOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
+{
+ // Validate in case of configured output
+ if(output.total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8);
+ }
+ return validate_arguments_common(input1, input2, output);
+}
+
+Status NEComparisonOperationKernel::validate(ComparisonOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+{
+ ARM_COMPUTE_UNUSED(op);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output));
+ return Status{};
}
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEElementwiseOperators.cpp b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
index 4d4a6a9c50..711e99ea77 100644
--- a/src/runtime/NEON/functions/NEElementwiseOperators.cpp
+++ b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -14,9 +14,9 @@
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INNEUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY NEAIM, DAMAGES OR OTHER
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
@@ -66,4 +66,30 @@ Status NEElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITens
{
return NEArithmeticOperationKernel::validate(ArithmeticOperation::SQUARED_DIFF, input1, input2, output);
}
+
+template <ComparisonOperation COP>
+void NEElementwiseComparisonStatic<COP>::configure(ITensor *input1, ITensor *input2, ITensor *output)
+{
+ auto k = arm_compute::support::cpp14::make_unique<NEComparisonOperationKernel>();
+ k->configure(COP, input1, input2, output);
+ _kernel = std::move(k);
+}
+
+template <ComparisonOperation COP>
+Status NEElementwiseComparisonStatic<COP>::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+{
+ return NEComparisonOperationKernel::validate(COP, input1, input2, output);
+}
+
+void NEElementwiseComparison::configure(ITensor *input1, ITensor *input2, ITensor *output, ComparisonOperation op)
+{
+ auto k = arm_compute::support::cpp14::make_unique<NEComparisonOperationKernel>();
+ k->configure(op, input1, input2, output);
+ _kernel = std::move(k);
+}
+
+Status NEElementwiseComparison::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ComparisonOperation op)
+{
+ return NEComparisonOperationKernel::validate(op, input1, input2, output);
+}
} // namespace arm_compute
diff --git a/tests/validation/NEON/Comparisons.cpp b/tests/validation/NEON/Comparisons.cpp
new file mode 100644
index 0000000000..c3b1c69523
--- /dev/null
+++ b/tests/validation/NEON/Comparisons.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEElementwiseOperations.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ComparisonOperationsDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ComparisonFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+const auto configure_dataset = combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", { DataType::QASYMM8,
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ DataType::F16,
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+ DataType::F32
+ }));
+
+const auto run_small_dataset = combine(datasets::ComparisonOperations(), datasets::SmallShapes());
+const auto run_large_dataset = combine(datasets::ComparisonOperations(), datasets::LargeShapes());
+
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(Comparison)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid output type
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching input types
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
+ }),
+ framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32),
+ TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
+ })),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
+ })),
+ framework::dataset::make("Expected", { false, false, false, true})),
+ input1_info, input2_info, output_info, expected)
+{
+ Status s = NEElementwiseComparison::validate(&input1_info.clone()->set_is_resizable(false),
+ &input2_info.clone()->set_is_resizable(false),
+ &output_info.clone()->set_is_resizable(false),
+ ComparisonOperation::Equal);
+ ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, configure_dataset,
+ shape, data_type)
+{
+ // Create tensors
+ Tensor ref_src1 = create_tensor<Tensor>(shape, data_type);
+ Tensor ref_src2 = create_tensor<Tensor>(shape, data_type);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::U8);
+
+ // Create and Configure function
+ NEElementwiseComparison compare;
+ compare.configure(&ref_src1, &ref_src2, &dst, ComparisonOperation::Equal);
+
+ // Validate valid region
+ const ValidRegion valid_region = shape_to_valid_region(shape);
+ validate(dst.info()->valid_region(), valid_region);
+}
+
+template <typename T>
+using NEComparisonFixture = ComparisonValidationFixture<Tensor, Accessor, NEElementwiseComparison, T>;
+
+TEST_SUITE(Float)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ NEComparisonFixture<half>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(run_small_dataset, framework::dataset::make("DataType", DataType::F16)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ NEComparisonFixture<half>,
+ framework::DatasetMode::NIGHTLY,
+ combine(run_large_dataset, framework::dataset::make("DataType", DataType::F16)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP16
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ NEComparisonFixture<float>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(run_small_dataset, framework::dataset::make("DataType", DataType::F32)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ NEComparisonFixture<float>,
+ framework::DatasetMode::NIGHTLY,
+ combine(run_large_dataset, framework::dataset::make("DataType", DataType::F32)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+
+template <typename T>
+using NEComparisonQuantizedFixture = ComparisonValidationQuantizedFixture<Tensor, Accessor, NEElementwiseComparison, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ NEComparisonQuantizedFixture<uint8_t>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(run_small_dataset, framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 255.f, 20) })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END() // Comparison
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute