aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h4
-rw-r--r--src/core/CL/cl_kernels/pixelwise_mul_int.cl51
-rw-r--r--src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp58
-rw-r--r--tests/validation/CL/PixelWiseMultiplication.cpp47
-rw-r--r--tests/validation/reference/PixelWiseMultiplication.cpp29
6 files changed, 143 insertions, 50 deletions
diff --git a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
index 804182b187..5828e1cb1a 100644
--- a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
@@ -48,7 +48,7 @@ public:
CLPixelWiseMultiplicationKernel &operator=(CLPixelWiseMultiplicationKernel &&) = default;
/** Initialise the kernel's input, output and border mode.
*
- * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32.
+ * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/QSYMM16/F16/F32.
* @param[in] input2 An input tensor. Data types supported: same as @p input1.
* @param[out] output The output tensor, Data types supported: same as @p input1. Note: U8 requires both inputs to be U8.
* @param[in] scale Scale to apply after multiplication.
@@ -60,7 +60,7 @@ public:
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
/** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplicationKernel
*
- * @param[in] input1 An input tensor info. Data types supported: U8/S16/F16/F32.
+ * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/S16/QSYMM16/F16/F32.
* @param[in] input2 An input tensor info. Data types supported: same as @p input1.
* @param[in] output The output tensor info, Data types supported: same as @p input1. Note: U8 requires both inputs to be U8.
* @param[in] scale Scale to apply after multiplication.
diff --git a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
index 0fa40a77f2..7833a6830d 100644
--- a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
+++ b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
@@ -38,7 +38,7 @@ class CLPixelWiseMultiplication : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs, output and convertion policy.
*
- * @param[in, out] input1 An input tensor. Data types supported: U8/S16/F16/F32.
+ * @param[in, out] input1 An input tensor. Data types supported: U8/QASYMM8/S16/QSYMM16/F16/F32.
* The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
* @param[in, out] input2 An input tensor. Data types supported: same as @p input1.
* The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
@@ -52,7 +52,7 @@ public:
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
/** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplication
*
- * @param[in] input1 An input tensor info. Data types supported: U8/S16/F16/F32.
+ * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/S16/QSYMM16/F16/F32.
* @param[in] input2 An input tensor info. Data types supported: same as @p input1.
* @param[in] output The output tensor info, Data types supported: same as @p input1. Note: U8 requires both inputs to be U8.
* @param[in] scale Scale to apply after multiplication.
diff --git a/src/core/CL/cl_kernels/pixelwise_mul_int.cl b/src/core/CL/cl_kernels/pixelwise_mul_int.cl
index 5b3acb7ae6..989316d661 100644
--- a/src/core/CL/cl_kernels/pixelwise_mul_int.cl
+++ b/src/core/CL/cl_kernels/pixelwise_mul_int.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,6 +32,9 @@
#define MUL_OP(x, y, scale, type, size) CONVERT_OP_INT((x) * (y) >> scale, type, size)
+#define CONVERT_RTE(x, type) (convert_##type##_rte((x)))
+#define CONVERT_DOWN(x, type) CONVERT_RTE(x, type)
+
#if defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_RES) && defined(DATA_TYPE_OUT)
/** Performs a pixelwise multiplication with integer scale of integer inputs.
*
@@ -88,18 +91,25 @@ __kernel void pixelwise_mul_int(
}
#endif /* defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_RES) && defined(DATA_TYPE_OUT) */
-#if defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT)
+#if defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE)
+
+#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE)
+#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
+#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
+
/** Performs a pixelwise multiplication with float scale of quantized inputs.
*
- * @note The quantization offset of the first operand must be passed at compile time using -DOFFSET_IN1, e.g. -DOFFSET_IN1=10
- * @note The quantization offset of the second operand must be passed at compile time using -DOFFSET_IN2, e.g. -DOFFSET_IN2=10
- * @note The quantization offset of the output must be passed at compile time using -DOFFSET_OUT, e.g. -DOFFSET_OUT=10
+ * @note The quantization offset of the first operand must be passed at compile time only if asymmetric using -DOFFSET_IN1, e.g. -DOFFSET_IN1=10
+ * @note The quantization offset of the second operand must be passed at compile time only if asymmetric using -DOFFSET_IN2, e.g. -DOFFSET_IN2=10
+ * @note The quantization offset of the output must be passed at compile time only if asymmetric using -DOFFSET_OUT, e.g. -DOFFSET_OUT=10
* @note The quantization scale of the first operand must be passed at compile time using -DSCALE_IN1, e.g. -DSCALE_IN1=10
* @note The quantization scale of the second operand must be passed at compile time using -DSCALE_IN2, e.g. -DSCALE_IN2=10
* @note The quantization scale of the output must be passed at compile time using -DSCALE_OUT, e.g. -DSCALE_OUT=10
* @note To perform saturating operation -DSATURATE has to be passed to the compiler otherwise wrapping policy will be used.
+ * @attention The data type must be passed at compile time using -DDATA_TYPE_OUT, i.e. -DDATA_TYPE_OUT=uchar
+ * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
*
- * @param[in] in1_ptr Pointer to the source image. Supported data types: U8, S16, F16, F32
+ * @param[in] in1_ptr Pointer to the source image. Supported data types: QASYMM8/QSYMM16
* @param[in] in1_stride_x Stride of the source image in X dimension (in bytes)
* @param[in] in1_step_x in1_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] in1_stride_y Stride of the source image in Y dimension (in bytes)
@@ -137,19 +147,28 @@ __kernel void pixelwise_mul_quantized(
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
// Load data
- int16 in_a = CONVERT(vload16(0, (__global uchar *)in1.ptr), int16);
- int16 in_b = CONVERT(vload16(0, (__global uchar *)in2.ptr), int16);
+ VEC_INT in_a = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_OUT *)in1.ptr), VEC_INT);
+ VEC_INT in_b = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_OUT *)in2.ptr), VEC_INT);
// Dequantize
- in_a -= (int16)(int)OFFSET_IN1;
- in_b -= (int16)(int)OFFSET_IN2;
- const float16 in1f32 = convert_float16(in_a) * (float16)(float)SCALE_IN1;
- const float16 in2f32 = convert_float16(in_b) * (float16)(float)SCALE_IN2;
+#if defined(OFFSET_IN1)
+ in_a -= (VEC_INT)((int)OFFSET_IN1);
+#endif // defined(OFFSET_IN1)
+#if defined(OFFSET_IN2)
+ in_b -= (VEC_INT)((int)OFFSET_IN2);
+#endif // defined(OFFSET_IN2)
+ const VEC_FLOAT in1f32 = CONVERT(in_a, VEC_FLOAT) * (VEC_FLOAT)((float)SCALE_IN1);
+ const VEC_FLOAT in2f32 = CONVERT(in_b, VEC_FLOAT) * (VEC_FLOAT)((float)SCALE_IN2);
- const float16 qresf32 = (in1f32 * in2f32 * scale) / ((float16)(float)SCALE_OUT) + ((float16)((float16)OFFSET_OUT));
- const uchar16 res = convert_uchar16_sat(convert_int16_rte(qresf32));
+#if defined(OFFSET_OUT)
+ const VEC_FLOAT qresf32 = (in1f32 * in2f32 * scale) / ((VEC_FLOAT)(float)SCALE_OUT) + ((VEC_FLOAT)((float)OFFSET_OUT));
+#else // defined(OFFSET_OUT)
+ const VEC_FLOAT qresf32 = (in1f32 * in2f32 * scale) / ((VEC_FLOAT)(float)SCALE_OUT);
+#endif // defined(OFFSET_OUT)
+ const VEC_TYPE res = CONVERT_SAT(CONVERT_DOWN(qresf32, VEC_INT), VEC_TYPE);
// Store result
- vstore16(res, 0, (__global uchar *)out.ptr);
+ VSTORE(VEC_SIZE)
+ (res, 0, (__global DATA_TYPE_OUT *)out.ptr);
}
-#endif /* defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) */ \ No newline at end of file
+#endif /* defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE) */
diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
index 050bbb810b..5b00fd15ea 100644
--- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
+++ b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
@@ -51,9 +51,9 @@ Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2,
ARM_COMPUTE_UNUSED(rounding_policy);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::QSYMM16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input2);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::QSYMM16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale < 0, "Scale cannot be negative.");
const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
@@ -64,9 +64,13 @@ Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2,
if(output->total_size() > 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::QSYMM16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
"Output can only be U8 if both inputs are U8");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8 && (input1->data_type() != DataType::QASYMM8 || input2->data_type() != DataType::QASYMM8),
+ "Output can only be QASYMM8 if both inputs are QASYMM8");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QSYMM16 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16),
+ "Output can only be QSYMM16 if both inputs are QSYMM16");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
}
@@ -91,6 +95,14 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input1, ITe
{
set_format_if_unknown(*output, Format::F32);
}
+ else if(input1->data_type() == DataType::QASYMM8)
+ {
+ set_data_type_if_unknown(*output, DataType::QASYMM8);
+ }
+ else if(input1->data_type() == DataType::QSYMM16)
+ {
+ set_data_type_if_unknown(*output, DataType::QSYMM16);
+ }
}
Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
@@ -146,14 +158,12 @@ void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const I
scale_int = std::abs(exponent - 1);
}
- std::string data_type;
std::string compute_type;
// Check if it has float inputs and output
if(is_data_type_float(input1->info()->data_type()) || is_data_type_float(input2->info()->data_type()))
{
scale_int = -1;
compute_type = (input1->info()->data_type() == DataType::F32 || input2->info()->data_type() == DataType::F32) ? "float" : "half";
- data_type = "DATA_TYPE_FLOAT";
}
else
{
@@ -165,41 +175,39 @@ void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const I
{
compute_type = "ushort";
}
- data_type = "DATA_TYPE_INT";
}
- const bool is_quantized = is_data_type_quantized_asymmetric(input1->info()->data_type());
-
- // Construct kernel name
- std::string kernel_name = "pixelwise_mul";
- if(!is_data_type_quantized(output->info()->data_type()))
- {
- kernel_name += (scale_int >= 0) ? "_int" : "_float";
- }
+ const bool is_quantized = is_data_type_quantized(input1->info()->data_type());
// Set kernel build options
+ std::string kernel_name = "pixelwise_mul";
CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
if(is_quantized)
{
const UniformQuantizationInfo iq1_info = input1->info()->quantization_info().uniform();
const UniformQuantizationInfo iq2_info = input2->info()->quantization_info().uniform();
const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
- build_opts.add_option("-DOFFSET_IN1=" + support::cpp11::to_string(iq1_info.offset));
- build_opts.add_option("-DOFFSET_IN2=" + support::cpp11::to_string(iq2_info.offset));
- build_opts.add_option("-DOFFSET_OUT=" + support::cpp11::to_string(oq_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + support::cpp11::to_string(iq1_info.scale));
- build_opts.add_option("-DSCALE_IN2=" + support::cpp11::to_string(iq2_info.scale));
- build_opts.add_option("-DSCALE_OUT=" + support::cpp11::to_string(oq_info.scale));
+ build_opts.add_option_if(is_data_type_quantized_asymmetric(input1->info()->data_type()),
+ "-DOFFSET_IN1=" + support::cpp11::to_string(iq1_info.offset));
+ build_opts.add_option_if(is_data_type_quantized_asymmetric(input2->info()->data_type()),
+ "-DOFFSET_IN2=" + support::cpp11::to_string(iq2_info.offset));
+ build_opts.add_option_if(is_data_type_quantized_asymmetric(output->info()->data_type()),
+ "-DOFFSET_OUT=" + support::cpp11::to_string(oq_info.offset));
+ build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1_info.scale));
+ build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2_info.scale));
+ build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
kernel_name += "_quantized";
}
else
{
+ kernel_name += (scale_int >= 0) ? "_int" : "_float";
build_opts.add_option_if_else(overflow_policy == ConvertPolicy::WRAP || is_data_type_float(output->info()->data_type()), "-DWRAP", "-DSATURATE");
build_opts.add_option_if_else(rounding_policy == RoundingPolicy::TO_ZERO, "-DROUND=_rtz", "-DROUND=_rte");
- build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1->info()->data_type()));
- build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2->info()->data_type()));
- build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
build_opts.add_option("-DDATA_TYPE_RES=" + compute_type);
}
@@ -207,7 +215,7 @@ void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const I
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
// Set scale argument
- unsigned int idx = 3 * num_arguments_per_3D_tensor(); //Skip the inputs and output parameters
+ unsigned int idx = 3 * num_arguments_per_3D_tensor(); // Skip the inputs and output parameters
if(scale_int >= 0 && !is_quantized)
{
@@ -415,4 +423,4 @@ BorderSize CLComplexPixelWiseMultiplicationKernel::border_size() const
const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration_complex - 1U, replicateSize);
return BorderSize{ 0, border, 0, 0 };
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/tests/validation/CL/PixelWiseMultiplication.cpp b/tests/validation/CL/PixelWiseMultiplication.cpp
index 03ce4c9639..22ff9f2fb9 100644
--- a/tests/validation/CL/PixelWiseMultiplication.cpp
+++ b/tests/validation/CL/PixelWiseMultiplication.cpp
@@ -41,7 +41,8 @@ namespace
namespace
{
const float scale_255 = 1.f / 255.f;
-constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
+constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit quantized asymmetric data types */
+constexpr AbsoluteTolerance<float> tolerance_qsymm16(1); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit quantized symmetric data types */
} //namespace
// *INDENT-OFF*
// clang-format off
@@ -131,14 +132,52 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPixelWiseMultiplicationQuantizedFixture<uint8
framework::dataset::make("Scale", { 1.f, 2.f })),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
framework::dataset::make("RoundingPolicy", RoundingPolicy::TO_NEAREST_EVEN)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 255.f, 20) })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255.f, 5) })))
+ framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })),
+ framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("OUtQInfo", { QuantizationInfo(1.f / 255.f, 5) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLPixelWiseMultiplicationQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("Scale", { 1.f, 2.f })),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
+ framework::dataset::make("RoundingPolicy", RoundingPolicy::TO_NEAREST_EVEN)),
+ framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })),
+ framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("OUtQInfo", { QuantizationInfo(1.f / 255.f, 5) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
TEST_SUITE_END() // QASYMM8
+TEST_SUITE(QSYMM16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPixelWiseMultiplicationQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QSYMM16)),
+ framework::dataset::make("Scale", { 1.f, 2.f })),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
+ framework::dataset::make("RoundingPolicy", RoundingPolicy::TO_NEAREST_EVEN)),
+ framework::dataset::make("Src0QInfo", { QuantizationInfo(1.f / 32768.f, 0) })),
+ framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 32768.f, 0) })),
+ framework::dataset::make("OutQInfo", { QuantizationInfo(5.f / 32768.f, 0) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qsymm16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLPixelWiseMultiplicationQuantizedFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::QSYMM16)),
+ framework::dataset::make("Scale", { 1.f, 2.f })),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
+ framework::dataset::make("RoundingPolicy", RoundingPolicy::TO_NEAREST_EVEN)),
+ framework::dataset::make("Src0QInfo", { QuantizationInfo(1.f / 32768.f, 0) })),
+ framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 32768.f, 0) })),
+ framework::dataset::make("OutQInfo", { QuantizationInfo(5.f / 32768.f, 0) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qsymm16);
+}
+TEST_SUITE_END() // QSYMM16
TEST_SUITE_END() // Quantized
TEST_SUITE_END() // PixelWiseMultiplication
diff --git a/tests/validation/reference/PixelWiseMultiplication.cpp b/tests/validation/reference/PixelWiseMultiplication.cpp
index 7dc32d08a7..41a919249e 100644
--- a/tests/validation/reference/PixelWiseMultiplication.cpp
+++ b/tests/validation/reference/PixelWiseMultiplication.cpp
@@ -176,10 +176,37 @@ SimpleTensor<uint8_t> pixel_wise_multiplication(const SimpleTensor<uint8_t> &src
}
return dst;
}
+
+template <>
+SimpleTensor<int16_t> pixel_wise_multiplication(const SimpleTensor<int16_t> &src1, const SimpleTensor<int16_t> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
+ const QuantizationInfo &qout)
+{
+ SimpleTensor<int16_t> dst(TensorShape::broadcast_shape(src1.shape(), src2.shape()), src2.data_type(), 1, qout);
+
+ if(src1.data_type() == DataType::QSYMM16 && src2.data_type() == DataType::QSYMM16)
+ {
+ SimpleTensor<float> src1_tmp = convert_from_symmetric<int16_t>(src1);
+ SimpleTensor<float> src2_tmp = convert_from_symmetric<int16_t>(src2);
+ SimpleTensor<float> dst_tmp = pixel_wise_multiplication<float>(src1_tmp, src2_tmp, scale, convert_policy, rounding_policy, qout);
+ dst = convert_to_symmetric<int16_t>(dst_tmp, qout);
+ }
+ else
+ {
+ if(scale < 0)
+ {
+ ARM_COMPUTE_ERROR("Scale of pixel-wise multiplication must be non-negative");
+ }
+
+ Coordinates id_src1{};
+ Coordinates id_src2{};
+ Coordinates id_dst{};
+ BroadcastUnroll<Coordinates::num_max_dimensions>::unroll(src1, src2, dst, scale, convert_policy, rounding_policy, id_src1, id_src2, id_dst);
+ }
+ return dst;
+}
// *INDENT-OFF*
// clang-format off
template SimpleTensor<int16_t> pixel_wise_multiplication(const SimpleTensor<uint8_t> &src1, const SimpleTensor<int16_t> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, const QuantizationInfo &qout);
-template SimpleTensor<int16_t> pixel_wise_multiplication(const SimpleTensor<int16_t> &src1, const SimpleTensor<int16_t> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, const QuantizationInfo &qout);
template SimpleTensor<float> pixel_wise_multiplication(const SimpleTensor<float> &src1, const SimpleTensor<float> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, const QuantizationInfo &qout);
template SimpleTensor<half_float::half> pixel_wise_multiplication(const SimpleTensor<half_float::half> &src1, const SimpleTensor<half_float::half> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, const QuantizationInfo &qout);
// clang-format on