aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-12-05 18:17:24 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2018-12-07 12:20:28 +0000
commita84faffd290139be54e4a52ab11da0369262e889 (patch)
tree709b051c3190675709b9db822a2f343ba7b564b9
parent149de5b98c81e41ecb41797c3f1b11b661d2987e (diff)
downloadComputeLibrary-a84faffd290139be54e4a52ab11da0369262e889.tar.gz
COMPMID-1826: Add support for QASYMM8 in NEArithmeticAdditionKernel
Change-Id: Ia7fb128e1f3944d0d831e1d125a6db3e1d257106 Reviewed-on: https://review.mlplatform.org/355 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Isabella Gottardi <isabella.gottardi@arm.com> Reviewed-by: Anthony Barbier <Anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h33
-rw-r--r--arm_compute/runtime/NEON/functions/NEArithmeticAddition.h14
-rw-r--r--examples/graph_vgg_vdsr.cpp3
-rw-r--r--src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp68
-rw-r--r--tests/validation/NEON/ArithmeticAddition.cpp65
5 files changed, 146 insertions, 37 deletions
diff --git a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
index 8cf21eae9d..73beca6ded 100644
--- a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
@@ -56,25 +56,26 @@ public:
*
* Valid configurations (Input1,Input2) -> Output :
*
- * - (U8,U8) -> U8
- * - (U8,U8) -> S16
- * - (S16,U8) -> S16
- * - (U8,S16) -> S16
- * - (S16,S16) -> S16
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
+ * - (U8,U8) -> U8
+ * - (U8,U8) -> S16
+ * - (S16,U8) -> S16
+ * - (U8,S16) -> S16
+ * - (S16,S16) -> S16
+ * - (F16,F16) -> F16
+ * - (F32,F32) -> F32
+ * - (QASYMM8,QASYMM8) -> QASYMM8
*
- * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32
- * @param[in] input2 An input tensor. Data types supported: U8/S16/F16/F32
- * @param[out] output The output tensor. Data types supported: U8/S16/F16/F32.
+ * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32.
* @param[in] policy Overflow policy.
*/
void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAdditionKernel
*
- * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32
- * @param[in] input2 An input tensor. Data types supported: U8/S16/F16/F32
- * @param[in] output The output tensor. Data types supported: U8/S16/F16/F32.
+ * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[in] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32.
* @param[in] policy Overflow policy.
*
* @return a status
@@ -88,9 +89,9 @@ public:
private:
/** Common signature for all the specialised add functions
*
- * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32
- * @param[in] input2 An input tensor. Data types supported: U8/S16/F16/F32
- * @param[out] output The output tensor. Data types supported: U8/S16/F16/F32.
+ * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32.
* @param[in] window Region on which to execute the kernel.
*/
using AddFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window);
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
index c29646397c..e35f2fa0cd 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
@@ -37,22 +37,22 @@ class NEArithmeticAddition : public INESimpleFunction
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32
- * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32
- * @param[out] output Output tensor. Data types supported: U8/S16/F16/F32
+ * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/F16/F32
* @param[in] policy Policy to use to handle overflow.
*/
void configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAddition
*
- * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32
- * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32
- * @param[in] output Output tensor. Data types supported: U8/S16/F16/F32
+ * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[in] output Output tensor. Data types supported: U8/SQASYMM8/16/F16/F32
* @param[in] policy Policy to use to handle overflow.
*
* @return a status
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
};
-}
+} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEARITHMETICADDITION_H__ */
diff --git a/examples/graph_vgg_vdsr.cpp b/examples/graph_vgg_vdsr.cpp
index 63fa604584..ca7d10f4a0 100644
--- a/examples/graph_vgg_vdsr.cpp
+++ b/examples/graph_vgg_vdsr.cpp
@@ -75,9 +75,6 @@ public:
std::cout << "Image width: " << image_width << std::endl;
std::cout << "Image height: " << image_height << std::endl;
- ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type) && common_params.target == Target::NEON,
- "QASYMM8 not supported for this graph");
-
// Get trainable parameters data path
const std::string data_path = common_params.data_path;
const std::string model_path = "/cnn_data/vdsr_model/";
diff --git a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
index 169554f87a..954a2c1754 100644
--- a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
+++ b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
@@ -171,6 +171,61 @@ void add_F32_F32_F32(const ITensor *in1, const ITensor *in2, ITensor *out, const
input1, input2, output);
}
+void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
+ Iterator output(out, window);
+
+ const float32x4_t vscale1 = vdupq_n_f32(in1->info()->quantization_info().scale);
+ const float32x4_t vscale2 = vdupq_n_f32(in2->info()->quantization_info().scale);
+ const float32x4_t invvscaleo = vdupq_n_f32(1.f / out->info()->quantization_info().scale);
+ const int32x4_t voffset1 = vdupq_n_s32(in1->info()->quantization_info().offset);
+ const int32x4_t voffset2 = vdupq_n_s32(in2->info()->quantization_info().offset);
+ const float32x4_t voffseto = vdupq_n_f32(out->info()->quantization_info().offset);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const uint8x16_t a = vld1q_u8(input1.ptr());
+ const uint8x16_t b = vld1q_u8(input2.ptr());
+
+ const float32x4x4_t af =
+ {
+ {
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1),
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1),
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1),
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1),
+ }
+ };
+
+ const float32x4x4_t bf =
+ {
+ {
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(b))))), voffset2)), vscale2),
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(b))))), voffset2)), vscale2),
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(b))))), voffset2)), vscale2),
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(b))))), voffset2)), vscale2),
+ }
+ };
+
+ const int32x4x4_t rf =
+ {
+ {
+ vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
+ vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
+ vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
+ vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
+ }
+ };
+
+ const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
+ const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
+ vst1q_u8(output.ptr(), vcombine_u8(pa, pb));
+ },
+ input1, input2, output);
+}
+
void add_wrap_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
@@ -332,8 +387,8 @@ Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2,
ARM_COMPUTE_UNUSED(policy);
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
@@ -349,7 +404,8 @@ Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2,
&& !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16)
&& !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16)
&& !(input1.data_type() == DataType::F32 && input2.data_type() == DataType::F32 && output.data_type() == DataType::F32)
- && !(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16 && output.data_type() == DataType::F16),
+ && !(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16 && output.data_type() == DataType::F16)
+ && !(input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8 && output.data_type() == DataType::QASYMM8),
"You called addition with the wrong image formats");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
@@ -381,6 +437,10 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITe
{
set_format_if_unknown(output, Format::F32);
}
+ else if(input1.data_type() == DataType::QASYMM8 || input2.data_type() == DataType::QASYMM8)
+ {
+ set_data_type_if_unknown(output, DataType::QASYMM8);
+ }
}
Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
@@ -432,6 +492,8 @@ void NEArithmeticAdditionKernel::configure(const ITensor *input1, const ITensor
{ "add_saturate_F32_F32_F32", &add_F32_F32_F32 },
{ "add_wrap_F16_F16_F16", &add_F16_F16_F16 },
{ "add_saturate_F16_F16_F16", &add_F16_F16_F16 },
+ { "add_wrap_QASYMM8_QASYMM8_QASYMM8", &add_QASYMM8_QASYMM8_QASYMM8 },
+ { "add_saturate_QASYMM8_QASYMM8_QASYMM8", &add_QASYMM8_QASYMM8_QASYMM8 },
};
_input1 = input1;
diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp
index 4dc56dce54..e66a442d9b 100644
--- a/tests/validation/NEON/ArithmeticAddition.cpp
+++ b/tests/validation/NEON/ArithmeticAddition.cpp
@@ -43,6 +43,8 @@ namespace validation
{
namespace
{
+constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
+
/** Input data sets **/
const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U8)), framework::dataset::make("DataType",
DataType::U8));
@@ -54,6 +56,8 @@ const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::m
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
const auto ArithmeticAdditionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataType", DataType::F32));
+const auto ArithmeticAdditionQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("DataType", DataType::QASYMM8));
} // namespace
TEST_SUITE(NEON)
@@ -91,6 +95,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
// clang-format on
// *INDENT-ON*
+TEST_SUITE(Integer)
TEST_SUITE(U8)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
shape, policy)
@@ -121,7 +126,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<uint8_t>, framework
// Validate output
validate(Accessor(_target), _reference);
}
-TEST_SUITE_END()
+TEST_SUITE_END() // U8
TEST_SUITE(S16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::U8, DataType::S16 })),
@@ -161,7 +166,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<int16_t>, framework
// Validate output
validate(Accessor(_target), _reference);
}
-TEST_SUITE_END()
+TEST_SUITE_END() // S16
+TEST_SUITE_END() // Integer
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -172,8 +178,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<half>, framework::D
// Validate output
validate(Accessor(_target), _reference);
}
-TEST_SUITE_END()
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+TEST_SUITE_END() // F16
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(F32)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
@@ -231,11 +237,54 @@ FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, NEArithmeticAdditionBroadcastFixture<f
// Validate output
validate(Accessor(_target), _reference);
}
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // F32
+TEST_SUITE_END() // Float
+
+template <typename T>
+using NEArithmeticAdditionQuantizedFixture = ArithmeticAdditionValidationQuantizedFixture<Tensor, Accessor, NEArithmeticAddition, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ shape, policy)
+{
+ // Create tensors
+ Tensor ref_src1 = create_tensor<Tensor>(shape, DataType::QASYMM8);
+ Tensor ref_src2 = create_tensor<Tensor>(shape, DataType::QASYMM8);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::QASYMM8);
+
+ // Create and Configure function
+ NEArithmeticAddition add;
+ add.configure(&ref_src1, &ref_src2, &dst, policy);
+
+ // Validate valid region
+ const ValidRegion valid_region = shape_to_valid_region(shape);
+ validate(dst.info()->valid_region(), valid_region);
+
+ // Validate padding
+ const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+ validate(ref_src1.info()->padding(), padding);
+ validate(ref_src2.info()->padding(), padding);
+ validate(dst.info()->padding(), padding);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ NEArithmeticAdditionQuantizedFixture<uint8_t>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQASYMM8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 255.f, 20) })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255.f, 5) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE_END() // Quantized
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // ArithmeticAddition
+TEST_SUITE_END() // NEON
} // namespace validation
} // namespace test
} // namespace arm_compute