aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2019-07-24 16:29:53 +0100
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2019-07-24 21:21:50 +0000
commitb5e75dbc1543ad7f549bf3351ac753614fca25e4 (patch)
treec8cc96219f427571ade5dfc4d08c0c583331d697
parent7faa0ec6d74883f29924efaf86c15a9634fdec64 (diff)
downloadComputeLibrary-b5e75dbc1543ad7f549bf3351ac753614fca25e4.tar.gz
COMPMID-2491: (Nightly) Bug in NEON LSTMLayerQuantized
Bug fix: We had to introduce a +1/-1 tolerance for 32-bit. This is already done for other layers on NEON (e.g., Activation) Change-Id: I449cd3c394ccdb46517f26ceff5d1254734a476a Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1607 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: VidhyaSudhan Loganathan <vidhyasudhan.loganathan@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--tests/validation/NEON/LSTMLayerQuantized.cpp17
1 files changed, 12 insertions, 5 deletions
diff --git a/tests/validation/NEON/LSTMLayerQuantized.cpp b/tests/validation/NEON/LSTMLayerQuantized.cpp
index d5d036de33..0935165564 100644
--- a/tests/validation/NEON/LSTMLayerQuantized.cpp
+++ b/tests/validation/NEON/LSTMLayerQuantized.cpp
@@ -63,6 +63,13 @@ inline void fill_tensor(SimpleTensor<T> &tensor, const std::vector<T> &v)
std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
}
+/** Tolerance for quantized asymmetric operations */
+#if defined(__aarch64__)
+constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(0);
+#else // defined(__aarch64__)
+constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1);
+#endif // defined(__aarch64__)
+
} // namespace
TEST_SUITE(NEON)
@@ -194,19 +201,19 @@ TEST_CASE(IntegrationTestCaseSmall, framework::DatasetMode::PRECOMMIT)
128, 131, 35, 133 });
lstmq.run();
- validate(Accessor(output_state), expected_output);
+ validate(Accessor(output_state), expected_output, tolerance_qsymm16);
// Second input
fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 12, 137,
128, 131, 10, 136 });
lstmq.run();
- validate(Accessor(output_state), expected_output);
+ validate(Accessor(output_state), expected_output, tolerance_qsymm16);
// Third input
fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 8, 140,
128, 130, 6, 138 });
lstmq.run();
- validate(Accessor(output_state), expected_output);
+ validate(Accessor(output_state), expected_output, tolerance_qsymm16);
}
TEST_CASE(IntegrationTestCaseLarge, framework::DatasetMode::PRECOMMIT)
@@ -424,7 +431,7 @@ TEST_CASE(IntegrationTestCaseLarge, framework::DatasetMode::PRECOMMIT)
140, 128, 128, 128, 128, 133, 132, 128 });
lstmq.run();
- validate(Accessor(output_state), expected_output);
+ validate(Accessor(output_state), expected_output, tolerance_qsymm16);
// Second input
fill_tensor(expected_output, std::vector<uint8_t> { 130, 128, 128, 128, 128, 205, 129, 137,
@@ -444,7 +451,7 @@ TEST_CASE(IntegrationTestCaseLarge, framework::DatasetMode::PRECOMMIT)
129, 128, 128, 128, 128, 171, 134, 129,
140, 128, 128, 128, 128, 135, 132, 129});
lstmq.run();
- validate(Accessor(output_state), expected_output);
+ validate(Accessor(output_state), expected_output, tolerance_qsymm16);
}
// clang-format on
// *INDENT-ON*