aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/QuantizationLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/NEON/QuantizationLayer.cpp')
-rw-r--r--tests/validation/NEON/QuantizationLayer.cpp59
1 files changed, 28 insertions, 31 deletions
diff --git a/tests/validation/NEON/QuantizationLayer.cpp b/tests/validation/NEON/QuantizationLayer.cpp
index a5372b897c..bab7490762 100644
--- a/tests/validation/NEON/QuantizationLayer.cpp
+++ b/tests/validation/NEON/QuantizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,6 +34,7 @@
#include "tests/validation/Validation.h"
#include "tests/validation/fixtures/QuantizationLayerFixture.h"
+
namespace arm_compute
{
namespace test
@@ -74,29 +75,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationSmallShapes, framework::dataset::make("DataType", DataType::F32)), shape, data_type)
-{
- // Create tensors
- Tensor src = create_tensor<Tensor>(shape, data_type);
- Tensor dst = create_tensor<Tensor>(shape, DataType::QASYMM8);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- NEQuantizationLayer quant_layer;
- quant_layer.configure(&src, &dst);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(src.info()->valid_region(), valid_region);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- validate(src.info()->padding(), PaddingSize());
- validate(dst.info()->padding(), PaddingSize());
-}
-
template <typename T>
using NEQuantizationLayerQASYMM8Fixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T, uint8_t>;
template <typename T>
@@ -120,7 +98,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8Signed, NEQuantizationLayerQASYMM8SignedFi
framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_u8);
+ validate(Accessor(_target), _reference, tolerance_s8);
}
FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, NEQuantizationLayerQASYMM16Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
framework::dataset::make("DataType", DataType::F32)),
@@ -163,7 +141,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8Signed, NEQuantizationLayerQASYMM8SignedFi
framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_u8);
+ validate(Accessor(_target), _reference, tolerance_s8);
}
FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, NEQuantizationLayerQASYMM16Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
framework::dataset::make("DataType", DataType::F16)),
@@ -205,7 +183,16 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8GenFixture<uin
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(0.5f, 10) })),
- framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 15) })))
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 15), QuantizationInfo(0.5f, 25) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u8);
+}
+FIXTURE_DATA_TEST_CASE(ConvertUint8toInt8, NEQuantizationLayerQASYMM8GenFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })),
+ framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(2.0f, -1) })),
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 127) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
@@ -214,7 +201,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, NEQuantizationLayerQASYMM8_SIGNED
framework::dataset::make("DataTypeIn", DataType::QASYMM8)),
framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })),
framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10), QuantizationInfo(2.0f, -25) })),
- framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 15) })))
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 15), QuantizationInfo(1.0f, 127) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_s8);
@@ -234,7 +221,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, NEQuantizationLayerQASYMM8_SIGNED
framework::dataset::make("DataTypeIn", DataType::QASYMM8_SIGNED)),
framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })),
framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })),
- framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, -5) })))
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, -5), QuantizationInfo(1.0f, 43) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_s8);
@@ -243,16 +230,26 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8GenFixture<int
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(2.0f, 10), QuantizationInfo(2.0f, -25) })),
- framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 30) })))
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 30), QuantizationInfo(2.0f, -128) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
+FIXTURE_DATA_TEST_CASE(ConvertInt8toUint8, NEQuantizationLayerQASYMM8_SIGNEDGenFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataTypeIn", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 0) })),
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, -128) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_s8);
+}
+
TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE_END() // Quantized
TEST_SUITE_END() // QuantizationLayer
-TEST_SUITE_END() // NEON
+TEST_SUITE_END() // Neon
} // namespace validation
} // namespace test
} // namespace arm_compute