aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/NEON/NEFixedPoint.inl60
-rw-r--r--src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp52
-rw-r--r--tests/validation/NEON/BatchNormalizationLayer.cpp28
3 files changed, 108 insertions, 32 deletions
diff --git a/arm_compute/core/NEON/NEFixedPoint.inl b/arm_compute/core/NEON/NEFixedPoint.inl
index 05e481561d..7cebfad924 100644
--- a/arm_compute/core/NEON/NEFixedPoint.inl
+++ b/arm_compute/core/NEON/NEFixedPoint.inl
@@ -1050,7 +1050,8 @@ inline qint8x8_t vrecip_qs8(qint8x8_t a, int fixed_point_position)
const qint8x8_t shift_value = vneg_s8(vsub_s8(vdup_n_s8(8), vadd_s8(vclz_s8(a), vdup_n_s8(fixed_point_position))));
const qint8x8_t temp = vshl_s8(a, shift_value);
- qint8x8_t x = vadd_s8(const_48_over_17, vmul_qs8(temp, const_32_over_17, fixed_point_position));
+ // Newton-Raphson division initial estimate X0 calculation
+ qint8x8_t x = vsub_s8(const_48_over_17, vmul_qs8(temp, const_32_over_17, fixed_point_position));
uint8x8_t set_one = vcgt_s8(x, const_one);
x = vbsl_s8(set_one, const_one, x);
@@ -1074,7 +1075,8 @@ inline qint16x4_t vrecip_qs16(qint16x4_t a, int fixed_point_position)
const qint16x4_t shift_value = vneg_s16(vsub_s16(vdup_n_s16(8), vadd_s16(vclz_s16(a), vdup_n_s16(fixed_point_position))));
const qint16x4_t temp = vshl_s16(a, shift_value);
- qint16x4_t x = vadd_s16(const_48_over_17, vmul_qs16(temp, const_32_over_17, fixed_point_position));
+ // Newton-Raphson division initial estimate X0 calculation
+ qint16x4_t x = vsub_s16(const_48_over_17, vmul_qs16(temp, const_32_over_17, fixed_point_position));
uint16x4_t set_one = vcgt_s16(x, const_one);
x = vbsl_s16(set_one, const_one, x);
@@ -1097,10 +1099,11 @@ inline qint8x8_t vqrecip_qs8(qint8x8_t a, int fixed_point_position)
const qint8x8_t const_one = vdup_n_s8(1 << fixed_point_position);
// Find shift value
- const qint8x8_t shift_value = vqneg_s8(vsub_s8(vdup_n_s8(8), vqadd_s8(vclz_s8(a), vdup_n_s8(fixed_point_position))));
+ const qint8x8_t shift_value = vqneg_s8(vqsub_s8(vdup_n_s8(8), vqadd_s8(vclz_s8(a), vdup_n_s8(fixed_point_position))));
const qint8x8_t temp = vqshl_s8(a, shift_value);
- qint8x8_t x = vqadd_s8(const_48_over_17, vqmul_qs8(temp, const_32_over_17, fixed_point_position));
+ // Newton-Raphson division initial estimate X0 calculation
+ qint8x8_t x = vqsub_s8(const_48_over_17, vqmul_qs8(temp, const_32_over_17, fixed_point_position));
uint8x8_t set_one = vcgt_s8(x, const_one);
x = vbsl_s8(set_one, const_one, x);
@@ -1124,17 +1127,18 @@ inline qint16x4_t vqrecip_qs16(qint16x4_t a, int fixed_point_position)
const qint16x4_t shift_value = vqneg_s16(vqsub_s16(vdup_n_s16(8), vqadd_s16(vclz_s16(a), vdup_n_s16(fixed_point_position))));
const qint16x4_t temp = vqshl_s16(a, shift_value);
- qint16x4_t x = vqadd_s16(const_48_over_17, vqmul_qs16(temp, const_32_over_17, fixed_point_position));
+ // Newton-Raphson division initial estimate X0 calculation
+ qint16x4_t x = vqsub_s16(const_48_over_17, vqmul_qs16(temp, const_32_over_17, fixed_point_position));
uint16x4_t set_one = vcgt_s16(x, const_one);
x = vbsl_s16(set_one, const_one, x);
// Use five iterations of Newton-Raphson method to get the result
- x = vqadd_s16(x, vmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
- x = vqadd_s16(x, vmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
- x = vqadd_s16(x, vmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
- x = vqadd_s16(x, vmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
- x = vqadd_s16(x, vmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
+ x = vqadd_s16(x, vqmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
+ x = vqadd_s16(x, vqmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
+ x = vqadd_s16(x, vqmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
+ x = vqadd_s16(x, vqmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
+ x = vqadd_s16(x, vqmul_qs16(x, vqsub_s16(const_one, vqmul_qs16(temp, x, fixed_point_position)), fixed_point_position));
return vqshl_s16(x, shift_value);
}
@@ -1150,6 +1154,7 @@ inline qint8x16_t vrecipq_qs8(qint8x16_t a, int fixed_point_position)
const qint8x16_t shift_value = vnegq_s8(vsubq_s8(vdupq_n_s8(8), vaddq_s8(vclzq_s8(a), vdupq_n_s8(fixed_point_position))));
const qint8x16_t temp = vshlq_s8(a, shift_value);
+ // Newton-Raphson division initial estimate X0 calculation
qint8x16_t x = vsubq_qs8(const_48_over_17, vmulq_qs8(temp, const_32_over_17, fixed_point_position));
// Set initial guess to one if x > 1
@@ -1175,6 +1180,7 @@ inline qint16x8_t vrecipq_qs16(qint16x8_t a, int fixed_point_position)
const qint16x8_t shift_value = vnegq_s16(vsubq_s16(vdupq_n_s16(16), vaddq_s16(vclzq_s16(a), vdupq_n_s16(fixed_point_position))));
const qint16x8_t temp = vshlq_s16(a, shift_value);
+ // Newton-Raphson division initial estimate X0 calculation
qint16x8_t x = vsubq_qs16(const_48_over_17, vmulq_qs16(temp, const_32_over_17, fixed_point_position));
// Set initial guess to one if x > 1
@@ -1202,6 +1208,7 @@ inline qint8x16_t vqrecipq_qs8(qint8x16_t a, int fixed_point_position)
const qint8x16_t shift_value = vqnegq_s8(vqsubq_s8(vdupq_n_s8(8), vqaddq_s8(vclzq_s8(a), vdupq_n_s8(fixed_point_position))));
const qint8x16_t temp = vqshlq_s8(a, shift_value);
+ // Newton-Raphson division initial estimate X0 calculation
qint8x16_t x = vqsubq_qs8(const_48_over_17, vqmulq_qs8(temp, const_32_over_17, fixed_point_position));
// Set initial guess to one if x > 1
@@ -1227,6 +1234,7 @@ inline qint16x8_t vqrecipq_qs16(qint16x8_t a, int fixed_point_position)
const qint16x8_t shift_value = vqnegq_s16(vqsubq_s16(vdupq_n_s16(16), vqaddq_s16(vclzq_s16(a), vdupq_n_s16(fixed_point_position))));
const qint16x8_t temp = vqshlq_s16(a, shift_value);
+ // Newton-Raphson division initial estimate X0 calculation
qint16x8_t x = vqsubq_qs16(const_48_over_17, vqmulq_qs16(temp, const_32_over_17, fixed_point_position));
// Set initial guess to one if x > 1
@@ -1881,10 +1889,10 @@ inline qint8x8_t vqtanh_qs8(qint8x8_t a, int fixed_point_position)
const qint8x8_t const_one = vdup_n_s8(1 << fixed_point_position);
const qint8x8_t const_two = vdup_n_s8(2 << fixed_point_position);
- qint8x8_t exp2x = vqexp_qs8(vqmul_qs8(const_two, a, fixed_point_position), fixed_point_position);
- qint8x8_t num = vqsub_qs8(exp2x, const_one);
- qint8x8_t den = vqadd_qs8(exp2x, const_one);
- qint8x8_t tanh = vqmul_qs8(num, vqrecip_qs8(den, fixed_point_position), fixed_point_position);
+ const qint8x8_t exp2x = vqexp_qs8(vqmul_qs8(const_two, a, fixed_point_position), fixed_point_position);
+ const qint8x8_t num = vqsub_qs8(exp2x, const_one);
+ const qint8x8_t den = vqadd_qs8(exp2x, const_one);
+ const qint8x8_t tanh = vqmul_qs8(num, vqrecip_qs8(den, fixed_point_position), fixed_point_position);
return tanh;
}
@@ -1894,10 +1902,10 @@ inline qint16x4_t vqtanh_qs16(qint16x4_t a, int fixed_point_position)
const qint16x4_t const_one = vdup_n_s16(1 << fixed_point_position);
const qint16x4_t const_two = vdup_n_s16(2 << fixed_point_position);
- qint16x4_t exp2x = vqexp_qs16(vqmul_qs16(const_two, a, fixed_point_position), fixed_point_position);
- qint16x4_t num = vqsub_qs16(exp2x, const_one);
- qint16x4_t den = vqadd_qs16(exp2x, const_one);
- qint16x4_t tanh = vqmul_qs16(num, vqrecip_qs16(den, fixed_point_position), fixed_point_position);
+ const qint16x4_t exp2x = vqexp_qs16(vqmul_qs16(const_two, a, fixed_point_position), fixed_point_position);
+ const qint16x4_t num = vqsub_qs16(exp2x, const_one);
+ const qint16x4_t den = vqadd_qs16(exp2x, const_one);
+ const qint16x4_t tanh = vqmul_qs16(num, vqrecip_qs16(den, fixed_point_position), fixed_point_position);
return tanh;
}
@@ -1907,10 +1915,10 @@ inline qint8x16_t vqtanhq_qs8(qint8x16_t a, int fixed_point_position)
const qint8x16_t const_one = vdupq_n_s8(1 << fixed_point_position);
const qint8x16_t const_two = vdupq_n_s8(2 << fixed_point_position);
- qint8x16_t exp2x = vqexpq_qs8(vqmulq_qs8(const_two, a, fixed_point_position), fixed_point_position);
- qint8x16_t num = vqsubq_qs8(exp2x, const_one);
- qint8x16_t den = vqaddq_qs8(exp2x, const_one);
- qint8x16_t tanh = vqmulq_qs8(num, vqrecipq_qs8(den, fixed_point_position), fixed_point_position);
+ const qint8x16_t exp2x = vqexpq_qs8(vqmulq_qs8(const_two, a, fixed_point_position), fixed_point_position);
+ const qint8x16_t num = vqsubq_qs8(exp2x, const_one);
+ const qint8x16_t den = vqaddq_qs8(exp2x, const_one);
+ const qint8x16_t tanh = vqmulq_qs8(num, vqrecipq_qs8(den, fixed_point_position), fixed_point_position);
return tanh;
}
@@ -1920,10 +1928,10 @@ inline qint16x8_t vqtanhq_qs16(qint16x8_t a, int fixed_point_position)
const qint16x8_t const_one = vdupq_n_s16(1 << fixed_point_position);
const qint16x8_t const_two = vdupq_n_s16(2 << fixed_point_position);
- qint16x8_t exp2x = vqexpq_qs16(vqmulq_qs16(const_two, a, fixed_point_position), fixed_point_position);
- qint16x8_t num = vqsubq_qs16(exp2x, const_one);
- qint16x8_t den = vqaddq_qs16(exp2x, const_one);
- qint16x8_t tanh = vqmulq_qs16(num, vqrecipq_qs16(den, fixed_point_position), fixed_point_position);
+ const qint16x8_t exp2x = vqexpq_qs16(vqmulq_qs16(const_two, a, fixed_point_position), fixed_point_position);
+ const qint16x8_t num = vqsubq_qs16(exp2x, const_one);
+ const qint16x8_t den = vqaddq_qs16(exp2x, const_one);
+ const qint16x8_t tanh = vqmulq_qs16(num, vqrecipq_qs16(den, fixed_point_position), fixed_point_position);
return tanh;
}
diff --git a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
index d0aec6965c..d1adfa7aec 100644
--- a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
@@ -47,7 +47,7 @@ void batch_normalization_q8(const ITensor *in, ITensor *out, const ITensor *mean
// Only compute denominator and NEON vectors once per feature map.
int slice = -1;
- int fixed_point_position = in->info()->fixed_point_position();
+ const int fixed_point_position = in->info()->fixed_point_position();
const auto input_mean = reinterpret_cast<const qint8_t *>(mean->ptr_to_element(Coordinates(0, 0)));
const auto input_var = reinterpret_cast<const qint8_t *>(var->ptr_to_element(Coordinates(0, 0)));
const auto input_gamma = reinterpret_cast<const qint8_t *>(gamma->ptr_to_element(Coordinates(0, 0)));
@@ -82,6 +82,50 @@ void batch_normalization_q8(const ITensor *in, ITensor *out, const ITensor *mean
input, output);
}
+void batch_normalization_q16(const ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window)
+{
+ Iterator input(in, window);
+ Iterator output(out, window);
+
+ // Hold information about the current feature map we are iterating.
+ // Only compute denominator and NEON vectors once per feature map.
+ int slice = -1;
+
+ const int fixed_point_position = in->info()->fixed_point_position();
+ const auto input_mean = reinterpret_cast<const qint16_t *>(mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const qint16_t *>(var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = reinterpret_cast<const qint16_t *>(gamma->ptr_to_element(Coordinates(0, 0)));
+ const auto input_beta = reinterpret_cast<const qint16_t *>(beta->ptr_to_element(Coordinates(0, 0)));
+
+ qint16x8_t mean_vec = vdupq_n_qs16(0);
+ qint16x8_t var_vec = vdupq_n_qs16(0);
+ qint16x8_t gamma_vec = vdupq_n_qs16(0);
+ qint16x8_t beta_vec = vdupq_n_qs16(0);
+ qint16x8_t denominator = vdupq_n_qs16(0);
+ const qint16x8_t epsilon_vec = vdupq_n_qs16(sqcvt_qs16_f32(epsilon, fixed_point_position));
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ if(slice != id.z())
+ {
+ // Conctruct vectors
+ mean_vec = vdupq_n_qs16(*(input_mean + id.z()));
+ var_vec = vdupq_n_qs16(*(input_var + id.z()));
+ gamma_vec = vdupq_n_qs16(*(input_gamma + id.z()));
+ beta_vec = vdupq_n_qs16(*(input_beta + id.z()));
+
+ // Calculate denominator
+ denominator = vqinvsqrtq_qs16(vqaddq_qs16(var_vec, epsilon_vec), fixed_point_position);
+ slice = id.z();
+ }
+
+ // Calculate x bar and store results
+ const qint16x8_t numerator = vqsubq_qs16(vld1q_qs16(reinterpret_cast<const qint16_t *>(input.ptr())), mean_vec);
+ const qint16x8_t x_bar = vqmulq_qs16(numerator, denominator, fixed_point_position);
+ vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vqmlaq_qs16(beta_vec, x_bar, gamma_vec, fixed_point_position));
+ },
+ input, output);
+}
+
void batch_normalization_fp32(const ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window)
{
Iterator input(in, window);
@@ -127,7 +171,7 @@ void batch_normalization_fp32(const ITensor *in, ITensor *out, const ITensor *me
void NEBatchNormalizationLayerKernel::configure(const ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F32);
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
// Output tensor auto initialization if not yet initialized
@@ -155,6 +199,10 @@ void NEBatchNormalizationLayerKernel::configure(const ITensor *input, ITensor *o
_func = &batch_normalization_q8;
num_elems_processed_per_iteration = 16;
break;
+ case DataType::QS16:
+ _func = &batch_normalization_q16;
+ num_elems_processed_per_iteration = 8;
+ break;
case DataType::F32:
_func = &batch_normalization_fp32;
num_elems_processed_per_iteration = 4;
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index d825c889b6..9d72996940 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -42,8 +42,9 @@ using namespace arm_compute::test::validation;
namespace
{
-const float tolerance_f = 1e-05; /**< Tolerance value for comparing reference's output against floating point implementation's output */
-const float tolerance_q = 3; /**< Tolerance value for comparing reference's output against quantized implementation's output */
+const float tolerance_f = 1e-05; /**< Tolerance value for comparing reference's output against floating point implementation's output */
+const float tolerance_qs8 = 6; /**< Tolerance value for comparing reference's output against quantized implementation's output */
+const float tolerance_qs16 = 6; /**< Tolerance value for comparing reference's output against quantized implementation's output */
/** Compute Neon batch normalization function.
*
@@ -129,7 +130,7 @@ BOOST_AUTO_TEST_SUITE(NEON)
BOOST_AUTO_TEST_SUITE(BatchNormalizationLayer)
BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(Configuration, RandomBatchNormalizationLayerDataset() * (boost::unit_test::data::make(DataType::F32) + boost::unit_test::data::make(DataType::QS8)), obj, dt)
+BOOST_DATA_TEST_CASE(Configuration, RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make({ DataType::QS8, DataType::QS16, DataType::F32 }), obj, dt)
{
// Set fixed point position data type allowed
int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -182,6 +183,7 @@ BOOST_DATA_TEST_CASE(Random,
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE(Quantized)
+BOOST_AUTO_TEST_SUITE(QS8)
BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
BOOST_DATA_TEST_CASE(Random,
RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(1, 6),
@@ -194,10 +196,28 @@ BOOST_DATA_TEST_CASE(Random,
RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
// Validate output
- validate(NEAccessor(dst), ref_dst, tolerance_q, 0);
+ validate(NEAccessor(dst), ref_dst, tolerance_qs8);
}
BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE(QS16)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(Random,
+ RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::QS16) * boost::unit_test::data::xrange(1, 14),
+ obj, dt, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
+
+ // Validate output
+ validate(NEAccessor(dst), ref_dst, tolerance_qs16);
+}
+BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END()
+
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
#endif /* DOXYGEN_SKIP_THIS */