aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2017-07-11 15:00:52 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:16:42 +0100
commit81f0d15d6840a0ae8ef571114555a26da74c4a43 (patch)
treea9eeda0a2b69961cd6a51d74e039bbed26a9b436
parentf70256bd46f03090281581c152bd17b4a50febcd (diff)
downloadComputeLibrary-81f0d15d6840a0ae8ef571114555a26da74c4a43.tar.gz
COMPMID-444: Add support for QS8/QS16 NEON Arithmetic Add/Sub/Mul.
Change-Id: Ia482498688ca1884272b5062e3415e736e03d36f Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80448 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
-rw-r--r--arm_compute/core/NEON/NEFixedPoint.h8
-rw-r--r--arm_compute/core/NEON/NEFixedPoint.inl5
-rw-r--r--arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h12
-rw-r--r--arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h12
-rw-r--r--arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h7
-rw-r--r--src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp48
-rw-r--r--src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp48
-rw-r--r--src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp64
-rw-r--r--tests/validation/NEON/ArithmeticAddition.cpp79
-rw-r--r--tests/validation/NEON/ArithmeticSubtraction.cpp81
-rw-r--r--tests/validation/NEON/PixelWiseMultiplication.cpp48
-rw-r--r--tests/validation/Reference.cpp16
-rw-r--r--tests/validation/Reference.h26
13 files changed, 386 insertions, 68 deletions
diff --git a/arm_compute/core/NEON/NEFixedPoint.h b/arm_compute/core/NEON/NEFixedPoint.h
index 09579f9120..50463b5efe 100644
--- a/arm_compute/core/NEON/NEFixedPoint.h
+++ b/arm_compute/core/NEON/NEFixedPoint.h
@@ -145,6 +145,14 @@ qint8x16_t vld1q_dup_qs8(const qint8_t *addr);
*/
qint16x8_t vld1q_dup_qs16(const qint16_t *addr);
+/** Load two 16 bit fixed point vectors from memory (8x2 elements)
+ *
+ * @param[in] addr Memory address of the 16 bit fixed point vectors to load
+ *
+ * @return 16 bit fixed point vectors (8x2 elements)
+ */
+qint16x8x2_t vld2q_qs16(qint16_t *addr);
+
/** Store a single 8 bit fixed point vector to memory (8 elements)
*
* @param[in] addr Memory address where the 8 bit fixed point vector should be stored
diff --git a/arm_compute/core/NEON/NEFixedPoint.inl b/arm_compute/core/NEON/NEFixedPoint.inl
index 4e862ba387..05e481561d 100644
--- a/arm_compute/core/NEON/NEFixedPoint.inl
+++ b/arm_compute/core/NEON/NEFixedPoint.inl
@@ -181,6 +181,11 @@ inline qint16x8_t vld1q_dup_qs16(const qint16_t *addr)
return vld1q_dup_s16(addr);
}
+inline qint16x8x2_t vld2q_qs16(const qint16_t *addr)
+{
+ return vld2q_s16(addr);
+}
+
inline void vst1_qs8(qint8_t *addr, qint8x8_t b)
{
vst1_s8(addr, b);
diff --git a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
index 9bfdde1616..7ad5893b70 100644
--- a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
@@ -50,9 +50,9 @@ public:
/** Initialise the kernel's input, output and border mode.
*
- * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32
- * @param[in] input2 An input tensor. Data types supported: U8/S16/F16 (only if @p input1 is F16)/F32 (only if @p input1 is F32).
- * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8), S16/F16 (only if both inputs are F16), F32 (only if both inputs are F32).
+ * @param[in] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8, QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
+ * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16 (only if @p input1 is F16), F32 (only if both inputs are F32).
* @param[in] policy Overflow policy.
*/
void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy);
@@ -63,9 +63,9 @@ public:
private:
/** Common signature for all the specialised add functions
*
- * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32
- * @param[in] input2 An input tensor. Data types supported: U8/S16/F16 (only if @p input1 is F16)/F32 (only if @p input1 is F32).
- * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8), S16/F16 (only if both inputs are F16), F32 (only if both inputs are F32).
+ * @param[in] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8, QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
+ * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16 (only if @p input1 is F16), F32 (only if both inputs are F32).
* @param[in] window Region on which to execute the kernel.
*/
using AddFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window);
diff --git a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
index 0eb9c23686..6f88d2757a 100644
--- a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
@@ -50,9 +50,9 @@ public:
/** Initialise the kernel's input, output and border mode.
*
- * @param[in] input1 An input tensor. Data types supported: U8/S16/F32
- * @param[in] input2 An input tensor. Data types supported: U8/S16/F32 (only if @p input1 is F32).
- * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8), S16/F32 (only if both inputs are F32).
+ * @param[in] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8, QS8 (only if @p input1 is QS8),QS16 (only if @p input1 is QS16), S16/F32 (only if @p input1 is F32).
+ * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F32 (only if both inputs are F32).
* @param[in] policy Overflow policy.
*/
void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy);
@@ -63,9 +63,9 @@ public:
private:
/** Common signature for all the specialised sub functions
*
- * @param[in] input1 An input tensor. Data types supported: U8, S16, F32.
- * @param[in] input2 An input tensor. Data types supported: U8, S16, F32 (only if @p input1 is F32).
- * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8), S16, F32 (only if both inputs are F32)
+ * @param[in] input1 An input tensor. Data types supported: U8/S16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8/S16/F32 (only if @p input1 is F32).
+ * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8), S16/F32 (only if both inputs are F32).
* @param[in] window Region on which to execute the kernel.
*/
using SubFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window);
diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
index 433a20e48e..bf96c9026c 100644
--- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
@@ -51,10 +51,11 @@ public:
*
* @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
* For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
+ * For QS8/QS16 scale = 1 is the only supported value.
*
- * @param[in] input1 An input tensor. Data types supported: U8/QS8/S16/F16/F32.
- * @param[in] input2 An input tensor. Data types supported: U8/QS8/S16/F16/F32.
- * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8) /S16/F16/F32.
+ * @param[in] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8, QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
+ * @param[out] output The output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16 (only if @p input1 is F16), F32 (only if both inputs are F32).
* @param[in] scale Scale to apply after multiplication.
* Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy.
diff --git a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
index c0809eb9fa..7f7e45a940 100644
--- a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
+++ b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/IAccessWindow.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/NEFixedPoint.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
@@ -45,6 +46,38 @@ class Coordinates;
namespace
{
+void add_wrap_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ Iterator input1(in1, window);
+ Iterator input2(in2, window);
+ Iterator output(out, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const qint8x16_t a = vld1q_qs8(reinterpret_cast<const qint8_t *>(input1.ptr()));
+ const qint8x16_t b = vld1q_qs8(reinterpret_cast<const qint8_t *>(input2.ptr()));
+
+ vst1q_qs8(reinterpret_cast<qint8_t *>(output.ptr()), vaddq_qs8(a, b));
+ },
+ input1, input2, output);
+}
+
+void add_saturate_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ Iterator input1(in1, window);
+ Iterator input2(in2, window);
+ Iterator output(out, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const qint8x16_t a = vld1q_qs8(reinterpret_cast<const qint8_t *>(input1.ptr()));
+ const qint8x16_t b = vld1q_qs8(reinterpret_cast<const qint8_t *>(input2.ptr()));
+
+ vst1q_qs8(reinterpret_cast<qint8_t *>(output.ptr()), vqaddq_qs8(a, b));
+ },
+ input1, input2, output);
+}
+
void add_wrap_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
Iterator input1(in1, window);
@@ -352,14 +385,21 @@ void NEArithmeticAdditionKernel::configure(const ITensor *input1, const ITensor
}
ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MSG(output->info()->data_type() == DataType::U8 && (input1->info()->data_type() != DataType::U8 || input2->info()->data_type() != DataType::U8),
"Output can only be U8 if both inputs are U8");
+ if(is_data_type_fixed_point(input1->info()->data_type()) || is_data_type_fixed_point(input2->info()->data_type()) || is_data_type_fixed_point(output->info()->data_type()))
+ {
+ // Check that all data types are the same and all fixed-point positions are the same
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2, output);
+ }
static std::map<std::string, AddFunction *> map_function =
{
+ { "add_wrap_QS8_QS8_QS8", &add_wrap_QS8_QS8_QS8 },
+ { "add_saturate_QS8_QS8_QS8", &add_saturate_QS8_QS8_QS8 },
{ "add_wrap_U8_U8_U8", &add_wrap_U8_U8_U8 },
{ "add_saturate_U8_U8_U8", &add_saturate_U8_U8_U8 },
{ "add_wrap_S16_U8_S16", &add_wrap_S16_U8_S16 },
@@ -368,6 +408,8 @@ void NEArithmeticAdditionKernel::configure(const ITensor *input1, const ITensor
{ "add_saturate_U8_S16_S16", &add_saturate_U8_S16_S16 },
{ "add_wrap_U8_U8_S16", &add_wrap_U8_U8_S16 },
{ "add_saturate_U8_U8_S16", &add_saturate_U8_U8_S16 },
+ { "add_wrap_QS16_QS16_QS16", &add_wrap_S16_S16_S16 },
+ { "add_saturate_QS16_QS16_QS16", &add_saturate_S16_S16_S16 },
{ "add_wrap_S16_S16_S16", &add_wrap_S16_S16_S16 },
{ "add_saturate_S16_S16_S16", &add_saturate_S16_S16_S16 },
{ "add_wrap_F32_F32_F32", &add_F32_F32_F32 },
diff --git a/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp b/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp
index f51b6b9f0b..cac2a6bd05 100644
--- a/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp
+++ b/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/NEFixedPoint.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
@@ -44,6 +45,38 @@ class Coordinates;
namespace
{
+void sub_wrap_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ Iterator input1(in1, window);
+ Iterator input2(in2, window);
+ Iterator output(out, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const qint8x16_t a = vld1q_qs8(reinterpret_cast<const qint8_t *>(input1.ptr()));
+ const qint8x16_t b = vld1q_qs8(reinterpret_cast<const qint8_t *>(input2.ptr()));
+
+ vst1q_qs8(reinterpret_cast<qint8_t *>(output.ptr()), vsubq_qs8(a, b));
+ },
+ input1, input2, output);
+}
+
+void sub_saturate_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ Iterator input1(in1, window);
+ Iterator input2(in2, window);
+ Iterator output(out, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const qint8x16_t a = vld1q_qs8(reinterpret_cast<const qint8_t *>(input1.ptr()));
+ const qint8x16_t b = vld1q_qs8(reinterpret_cast<const qint8_t *>(input2.ptr()));
+
+ vst1q_qs8(reinterpret_cast<qint8_t *>(output.ptr()), vqsubq_qs8(a, b));
+ },
+ input1, input2, output);
+}
+
void sub_wrap_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
Iterator input1(in1, window);
@@ -302,14 +335,21 @@ void NEArithmeticSubtractionKernel::configure(const ITensor *input1, const ITens
}
ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::S16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::S16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MSG(output->info()->data_type() == DataType::U8 && (input1->info()->data_type() != DataType::U8 || input2->info()->data_type() != DataType::U8),
"Output can only be U8 if both inputs are U8");
+ if(is_data_type_fixed_point(input1->info()->data_type()) || is_data_type_fixed_point(input2->info()->data_type()) || is_data_type_fixed_point(output->info()->data_type()))
+ {
+ // Check that all data types are the same and all fixed-point positions are the same
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2, output);
+ }
static std::map<std::string, SubFunction *> map_function =
{
+ { "sub_wrap_QS8_QS8_QS8", &sub_wrap_QS8_QS8_QS8 },
+ { "sub_saturate_QS8_QS8_QS8", &sub_saturate_QS8_QS8_QS8 },
{ "sub_wrap_U8_U8_U8", &sub_wrap_U8_U8_U8 },
{ "sub_wrap_U8_U8_S16", &sub_wrap_U8_U8_S16 },
{ "sub_saturate_U8_U8_U8", &sub_saturate_U8_U8_U8 },
@@ -318,6 +358,8 @@ void NEArithmeticSubtractionKernel::configure(const ITensor *input1, const ITens
{ "sub_wrap_S16_U8_S16", &sub_wrap_S16_U8_S16 },
{ "sub_saturate_U8_S16_S16", &sub_saturate_U8_S16_S16 },
{ "sub_saturate_S16_U8_S16", &sub_saturate_S16_U8_S16 },
+ { "sub_wrap_QS16_QS16_QS16", &sub_wrap_S16_S16_S16 },
+ { "sub_saturate_QS16_QS16_QS16", &sub_saturate_S16_S16_S16 },
{ "sub_wrap_S16_S16_S16", &sub_wrap_S16_S16_S16 },
{ "sub_saturate_S16_S16_S16", &sub_saturate_S16_S16_S16 },
{ "sub_wrap_F32_F32_F32", &sub_F32_F32_F32 },
diff --git a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
index 83d6d8218e..150db39695 100644
--- a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
+++ b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
@@ -148,6 +148,46 @@ void mul_QS8_QS8_QS8_n(const void *__restrict input1_ptr, const void *__restrict
}
template <bool is_scale255, bool is_sat>
+void mul_QS16_QS16_QS16_n(const void *__restrict input1_ptr, const void *__restrict input2_ptr, void *__restrict output_ptr, int n, int fixed_point_position)
+{
+ // n is the exponent of the scaling factor, that is scale = 1/2^n. Currently, we only support scaling factor equal to 1 => n = 0.
+ ARM_COMPUTE_ERROR_ON_MSG(n != 0, "Scaling factor different than 1 not supported for 16-bit fixed-point pixel-wise multiplication");
+ ARM_COMPUTE_UNUSED(n);
+
+ const qint16x8x2_t ta1 = vld2q_qs16(static_cast<const qint16_t *__restrict>(input1_ptr));
+ const qint16x8x2_t ta2 = vld2q_qs16(static_cast<const qint16_t *__restrict>(input2_ptr));
+
+ if(is_sat)
+ {
+ const qint16x8x2_t res =
+ {
+ {
+ // First 8 elements
+ vqmulq_qs16(ta1.val[0], ta2.val[0], fixed_point_position),
+ // Second 8 elements
+ vqmulq_qs16(ta1.val[1], ta2.val[1], fixed_point_position)
+ }
+ };
+
+ vst2q_s16(static_cast<qint16_t *__restrict>(output_ptr), res);
+ }
+ else
+ {
+ const qint16x8x2_t res =
+ {
+ {
+ // First 8 elements
+ vmulq_qs16(ta1.val[0], ta2.val[0], fixed_point_position),
+ // Second 8 elements
+ vmulq_qs16(ta1.val[1], ta2.val[1], fixed_point_position)
+ }
+ };
+
+ vst2q_s16(static_cast<qint16_t *__restrict>(output_ptr), res);
+ }
+}
+
+template <bool is_scale255, bool is_sat>
inline int16x8_t mul_S16_S16_S16_n_loop(const int16x8_t &input1, const int16x8_t &input2, int n)
{
int32x4_t tmp1_high = vmovl_s16(vget_high_s16(input1));
@@ -389,16 +429,15 @@ void NEPixelWiseMultiplicationKernel::configure(const ITensor *input1, const ITe
}
ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MSG(output->info()->data_type() == DataType::U8 && (input1->info()->data_type() != DataType::U8 || input2->info()->data_type() != DataType::U8),
"Output can only be U8 if both inputs are U8");
- if(input1->info()->data_type() == DataType::QS8)
+ if(is_data_type_fixed_point(input1->info()->data_type()) || is_data_type_fixed_point(input2->info()->data_type()) || is_data_type_fixed_point(output->info()->data_type()))
{
- // All data types must be QS8
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input1, input2, output);
+ // Check that all data types are the same and all fixed-point positions are the same
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2, output);
}
_input1 = input1;
@@ -513,6 +552,17 @@ void NEPixelWiseMultiplicationKernel::configure(const ITensor *input1, const ITe
_func_q_int = is_sat ? &mul_QS8_QS8_QS8_n<false, true> : &mul_QS8_QS8_QS8_n<false, false>;
}
}
+ else if(DataType::QS16 == dt_input1 && DataType::QS16 == dt_input2 && DataType::QS16 == dt_output)
+ {
+ if(is_scale_255)
+ {
+ _func_q_int = is_sat ? &mul_QS16_QS16_QS16_n<true, true> : &mul_QS16_QS16_QS16_n<true, false>;
+ }
+ else
+ {
+ _func_q_int = is_sat ? &mul_QS16_QS16_QS16_n<false, true> : &mul_QS16_QS16_QS16_n<false, false>;
+ }
+ }
else if(DataType::F16 == dt_input1 && DataType::F16 == dt_input2 && DataType::F16 == dt_output)
{
_func_float = &mul_F16_F16_F16_n<false, false>;
diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp
index 65c3295e18..6feb15ad34 100644
--- a/tests/validation/NEON/ArithmeticAddition.cpp
+++ b/tests/validation/NEON/ArithmeticAddition.cpp
@@ -51,20 +51,21 @@ namespace
{
/** Compute Neon arithmetic addition function.
*
- * @param[in] shape Shape of the input and output tensors.
- * @param[in] dt_in0 Data type of first input tensor.
- * @param[in] dt_in1 Data type of second input tensor.
- * @param[in] dt_out Data type of the output tensor.
- * @param[in] policy Overflow policy of the operation.
+ * @param[in] shape Shape of the input and output tensors.
+ * @param[in] dt_in0 Data type of first input tensor.
+ * @param[in] dt_in1 Data type of second input tensor.
+ * @param[in] dt_out Data type of the output tensor.
+ * @param[in] policy Overflow policy of the operation.
+ * @param[in] fixed_point_position (Optional) Fixed point position that expresses the number of bits for the fractional part of the number when the tensor's data type is QS8 or QS16 (default = 0).
*
* @return Computed output tensor.
*/
-Tensor compute_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy policy)
+Tensor compute_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy policy, int fixed_point_position = 0)
{
// Create tensors
- Tensor src1 = create_tensor<Tensor>(shape, dt_in0);
- Tensor src2 = create_tensor<Tensor>(shape, dt_in1);
- Tensor dst = create_tensor<Tensor>(shape, dt_out);
+ Tensor src1 = create_tensor<Tensor>(shape, dt_in0, 1, fixed_point_position);
+ Tensor src2 = create_tensor<Tensor>(shape, dt_in1, 1, fixed_point_position);
+ Tensor dst = create_tensor<Tensor>(shape, dt_out, 1, fixed_point_position);
// Create and configure function
NEArithmeticAddition add;
@@ -184,6 +185,66 @@ BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::make({ Da
}
BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE(Quantized)
+BOOST_AUTO_TEST_SUITE(QS8)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 7),
+ shape, policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_arithmetic_addition(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_arithmetic_addition(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 7),
+ shape, policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_arithmetic_addition(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_arithmetic_addition(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE(QS16)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 15),
+ shape, policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_arithmetic_addition(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_arithmetic_addition(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 15),
+ shape, policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_arithmetic_addition(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_arithmetic_addition(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END()
+
#ifdef ARM_COMPUTE_ENABLE_FP16
BOOST_AUTO_TEST_SUITE(F16)
BOOST_DATA_TEST_CASE(RunSmall, SmallShapes(), shape)
diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp
index 37240faaf6..54cd9f04ba 100644
--- a/tests/validation/NEON/ArithmeticSubtraction.cpp
+++ b/tests/validation/NEON/ArithmeticSubtraction.cpp
@@ -51,20 +51,21 @@ namespace
{
/** Compute Neon arithmetic subtraction function.
*
- * @param[in] shape Shape of the input and output tensors.
- * @param[in] dt_in0 Data type of first input tensor.
- * @param[in] dt_in1 Data type of second input tensor.
- * @param[in] dt_out Data type of the output tensor.
- * @param[in] policy Overflow policy of the operation.
+ * @param[in] shape Shape of the input and output tensors.
+ * @param[in] dt_in0 Data type of first input tensor.
+ * @param[in] dt_in1 Data type of second input tensor.
+ * @param[in] dt_out Data type of the output tensor.
+ * @param[in] policy Overflow policy of the operation.
+ * @param[in] fixed_point_position (Optional) Fixed point position that expresses the number of bits for the fractional part of the number when the tensor's data type is QS8 or QS16 (default = 0).
*
* @return Computed output tensor.
*/
-Tensor compute_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy policy)
+Tensor compute_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy policy, int fixed_point_position = 0)
{
// Create tensors
- Tensor src1 = create_tensor<Tensor>(shape, dt_in0);
- Tensor src2 = create_tensor<Tensor>(shape, dt_in1);
- Tensor dst = create_tensor<Tensor>(shape, dt_out);
+ Tensor src1 = create_tensor<Tensor>(shape, dt_in0, 1, fixed_point_position);
+ Tensor src2 = create_tensor<Tensor>(shape, dt_in1, 1, fixed_point_position);
+ Tensor dst = create_tensor<Tensor>(shape, dt_out, 1, fixed_point_position);
// Create and configure function
NEArithmeticSubtraction sub;
@@ -184,7 +185,67 @@ BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::make({ Da
}
BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE(F32)
+BOOST_AUTO_TEST_SUITE(Quantized)
+BOOST_AUTO_TEST_SUITE(QS8)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 7),
+ shape, policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_arithmetic_subtraction(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_arithmetic_subtraction(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 7),
+ shape, policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_arithmetic_subtraction(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_arithmetic_subtraction(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE(QS16)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 15),
+ shape, policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_arithmetic_subtraction(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_arithmetic_subtraction(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 15),
+ shape, policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_arithmetic_subtraction(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_arithmetic_subtraction(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE(Float)
BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * boost::unit_test::data::make({ ConvertPolicy::SATURATE, ConvertPolicy::WRAP }),
shape, policy)
diff --git a/tests/validation/NEON/PixelWiseMultiplication.cpp b/tests/validation/NEON/PixelWiseMultiplication.cpp
index 26ea38a52b..4bc4d6ca06 100644
--- a/tests/validation/NEON/PixelWiseMultiplication.cpp
+++ b/tests/validation/NEON/PixelWiseMultiplication.cpp
@@ -334,7 +334,7 @@ BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * (1.f / 255.f) * ConvertPolicies()
BOOST_AUTO_TEST_SUITE_END()
#endif /* ARM_COMPUTE_ENABLE_FP16 */
-BOOST_AUTO_TEST_SUITE(Float)
+BOOST_AUTO_TEST_SUITE(F32)
BOOST_AUTO_TEST_SUITE(Scale255)
BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * (1.f / 255.f) * ConvertPolicies()
@@ -428,6 +428,7 @@ BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE(Quantized)
+BOOST_AUTO_TEST_SUITE(QS8)
BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * DataType::QS8 *ConvertPolicies() * RoundingPolicy::TO_ZERO * boost::unit_test::data::xrange<int>(1, 7),
shape, dt, convert_policy, rounding_policy, fixed_point_position)
@@ -441,6 +442,51 @@ BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * DataType::QS8 *ConvertPolicies()
// Validate output
validate(NEAccessor(dst), ref_dst);
}
+
+BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * DataType::QS8 *ConvertPolicies() * RoundingPolicy::TO_ZERO * boost::unit_test::data::xrange<int>(1, 7),
+ shape, dt, convert_policy, rounding_policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE(QS16)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * DataType::QS16 *ConvertPolicies() * RoundingPolicy::TO_ZERO * boost::unit_test::data::xrange<int>(1, 15),
+ shape, dt, convert_policy, rounding_policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_fixed_point_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, fixed_point_position, convert_policy, rounding_policy);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+
+BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * DataType::QS16 *ConvertPolicies() * RoundingPolicy::TO_ZERO * boost::unit_test::data::xrange<int>(1, 15),
+ shape, dt, convert_policy, rounding_policy, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst);
+}
+BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index 1b941870ba..65705b17de 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -168,12 +168,12 @@ RawTensor Reference::compute_reference_accumulate_weighted(const TensorShape &sh
return ref_dst;
}
-RawTensor Reference::compute_reference_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy)
+RawTensor Reference::compute_reference_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy, int fixed_point_position)
{
// Create reference
- RawTensor ref_src1 = library->get(shape, dt_in0);
- RawTensor ref_src2 = library->get(shape, dt_in1);
- RawTensor ref_dst = library->get(shape, dt_out);
+ RawTensor ref_src1 = library->get(shape, dt_in0, 1, fixed_point_position);
+ RawTensor ref_src2 = library->get(shape, dt_in1, 1, fixed_point_position);
+ RawTensor ref_dst = library->get(shape, dt_out, 1, fixed_point_position);
// Fill reference
library->fill_tensor_uniform(ref_src1, 0);
@@ -185,12 +185,12 @@ RawTensor Reference::compute_reference_arithmetic_addition(const TensorShape &sh
return ref_dst;
}
-RawTensor Reference::compute_reference_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy)
+RawTensor Reference::compute_reference_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy, int fixed_point_position)
{
// Create reference
- RawTensor ref_src1 = library->get(shape, dt_in0);
- RawTensor ref_src2 = library->get(shape, dt_in1);
- RawTensor ref_dst = library->get(shape, dt_out);
+ RawTensor ref_src1 = library->get(shape, dt_in0, 1, fixed_point_position);
+ RawTensor ref_src2 = library->get(shape, dt_in1, 1, fixed_point_position);
+ RawTensor ref_dst = library->get(shape, dt_out, 1, fixed_point_position);
// Fill reference
library->fill_tensor_uniform(ref_src1, 0);
diff --git a/tests/validation/Reference.h b/tests/validation/Reference.h
index 902d04d151..3ad9814439 100644
--- a/tests/validation/Reference.h
+++ b/tests/validation/Reference.h
@@ -107,26 +107,28 @@ public:
static RawTensor compute_reference_accumulate_weighted(const TensorShape &shape, float alpha);
/** Compute reference arithmetic addition.
*
- * @param[in] shape Shape of the input and output tensors.
- * @param[in] dt_in0 Data type of first input tensor.
- * @param[in] dt_in1 Data type of second input tensor.
- * @param[in] dt_out Data type of the output tensor.
- * @param[in] convert_policy Overflow policy of the operation.
+ * @param[in] shape Shape of the input and output tensors.
+ * @param[in] dt_in0 Data type of first input tensor.
+ * @param[in] dt_in1 Data type of second input tensor.
+ * @param[in] dt_out Data type of the output tensor.
+ * @param[in] convert_policy Overflow policy of the operation.
+ * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers
*
* @return Computed raw tensor.
*/
- static RawTensor compute_reference_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy);
+ static RawTensor compute_reference_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy, int fixed_point_position = 0);
/** Compute reference arithmetic subtraction.
*
- * @param[in] shape Shape of the input and output tensors.
- * @param[in] dt_in0 Data type of first input tensor.
- * @param[in] dt_in1 Data type of second input tensor.
- * @param[in] dt_out Data type of the output tensor.
- * @param[in] convert_policy Overflow policy of the operation.
+ * @param[in] shape Shape of the input and output tensors.
+ * @param[in] dt_in0 Data type of first input tensor.
+ * @param[in] dt_in1 Data type of second input tensor.
+ * @param[in] dt_out Data type of the output tensor.
+ * @param[in] convert_policy Overflow policy of the operation.
+ * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers
*
* @return Computed raw tensor.
*/
- static RawTensor compute_reference_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy);
+ static RawTensor compute_reference_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy, int fixed_point_position = 0);
/** Compute reference bitwise and.
*
* @param[in] shape Shape of the input and output tensors.