diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2020-06-16 16:21:00 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2020-06-25 13:14:23 +0000 |
commit | c41a6a611973cb245220641e06f8fa984b156954 (patch) | |
tree | b47b9b517d839f97bd7a489e3fae81037753b014 /tests/validation/CL/ArithmeticAddition.cpp | |
parent | 173ba9bbb19ea83f951318d9989e440768b4de8f (diff) | |
download | ComputeLibrary-c41a6a611973cb245220641e06f8fa984b156954.tar.gz |
COMPMID-3530: Fix validate calls in CLElementwiseOperationKernel
Change-Id: I315520ca825e2a420bb28308f020b95ff2969042
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3341
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests/validation/CL/ArithmeticAddition.cpp')
-rw-r--r-- | tests/validation/CL/ArithmeticAddition.cpp | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp index 180b1aa3b0..3e2b192e13 100644 --- a/tests/validation/CL/ArithmeticAddition.cpp +++ b/tests/validation/CL/ArithmeticAddition.cpp @@ -102,6 +102,31 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( // clang-format on // *INDENT-ON* +/** Validate fused activation expecting the following behaviours: + * + * - Fused activation with float data type should succeed + * - Fused activation with quantized data type should fail + * + */ +TEST_CASE(FusedActivation, framework::DatasetMode::ALL) +{ + auto input = TensorInfo{ TensorShape(2U, 2U), 1, DataType::F32 }; + auto output = TensorInfo{ TensorShape(2U, 2U), 1, DataType::F32 }; + Status result{}; + + const auto act_info = ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU); + + // Fused-activation float type + result = CLArithmeticAddition::validate(&input, &input, &output, ConvertPolicy::WRAP, act_info); + ARM_COMPUTE_EXPECT(bool(result) == true, framework::LogLevel::ERRORS); + + // Fused-activation quantized type + input.set_data_type(DataType::QASYMM8); + output.set_data_type(DataType::QASYMM8); + result = CLArithmeticAddition::validate(&input, &input, &output, ConvertPolicy::WRAP, act_info); + ARM_COMPUTE_EXPECT(bool(result) == false, framework::LogLevel::ERRORS); +} + template <typename T> using CLArithmeticAdditionFixture = ArithmeticAdditionValidationFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>; |