From b5e75dbc1543ad7f549bf3351ac753614fca25e4 Mon Sep 17 00:00:00 2001 From: giuros01 Date: Wed, 24 Jul 2019 16:29:53 +0100 Subject: COMPMID-2491: (Nightly) Bug in NEON LSTMLayerQuantized Bug fix: We had to introduce a +1/-1 tolerance for 32-bit. This is already done for other layers on NEON (e.g., Activation) Change-Id: I449cd3c394ccdb46517f26ceff5d1254734a476a Signed-off-by: giuros01 Reviewed-on: https://review.mlplatform.org/c/1607 Comments-Addressed: Arm Jenkins Reviewed-by: VidhyaSudhan Loganathan Tested-by: Arm Jenkins --- tests/validation/NEON/LSTMLayerQuantized.cpp | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/validation/NEON/LSTMLayerQuantized.cpp b/tests/validation/NEON/LSTMLayerQuantized.cpp index d5d036de33..0935165564 100644 --- a/tests/validation/NEON/LSTMLayerQuantized.cpp +++ b/tests/validation/NEON/LSTMLayerQuantized.cpp @@ -63,6 +63,13 @@ inline void fill_tensor(SimpleTensor &tensor, const std::vector &v) std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size()); } +/** Tolerance for quantized asymmetric operations */ +#if defined(__aarch64__) +constexpr AbsoluteTolerance tolerance_qsymm16(0); +#else // defined(__aarch64__) +constexpr AbsoluteTolerance tolerance_qsymm16(1); +#endif // defined(__aarch64__) + } // namespace TEST_SUITE(NEON) @@ -194,19 +201,19 @@ TEST_CASE(IntegrationTestCaseSmall, framework::DatasetMode::PRECOMMIT) 128, 131, 35, 133 }); lstmq.run(); - validate(Accessor(output_state), expected_output); + validate(Accessor(output_state), expected_output, tolerance_qsymm16); // Second input fill_tensor(expected_output, std::vector { 128, 129, 12, 137, 128, 131, 10, 136 }); lstmq.run(); - validate(Accessor(output_state), expected_output); + validate(Accessor(output_state), expected_output, tolerance_qsymm16); // Third input fill_tensor(expected_output, std::vector { 128, 129, 8, 140, 128, 130, 6, 138 }); lstmq.run(); - validate(Accessor(output_state), expected_output); + validate(Accessor(output_state), expected_output, tolerance_qsymm16); } TEST_CASE(IntegrationTestCaseLarge, framework::DatasetMode::PRECOMMIT) @@ -424,7 +431,7 @@ TEST_CASE(IntegrationTestCaseLarge, framework::DatasetMode::PRECOMMIT) 140, 128, 128, 128, 128, 133, 132, 128 }); lstmq.run(); - validate(Accessor(output_state), expected_output); + validate(Accessor(output_state), expected_output, tolerance_qsymm16); // Second input fill_tensor(expected_output, std::vector { 130, 128, 128, 128, 128, 205, 129, 137, @@ -444,7 +451,7 @@ TEST_CASE(IntegrationTestCaseLarge, framework::DatasetMode::PRECOMMIT) 129, 128, 128, 128, 128, 171, 134, 129, 140, 128, 128, 128, 128, 135, 132, 129}); lstmq.run(); - validate(Accessor(output_state), expected_output); + validate(Accessor(output_state), expected_output, tolerance_qsymm16); } // clang-format on // *INDENT-ON* -- cgit v1.2.1