From 631c41a4e3645a948b0f597caa77e8fa91ca0efc Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 6 Dec 2017 11:53:03 +0000 Subject: COMPMID-556: Rename Error to Status and inverse logic Change-Id: Ib57d4f7177cc6179302bda7ad870acb8bd3825f5 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112115 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com Reviewed-by: Anthony Barbier --- tests/validation/CL/ActivationLayer.cpp | 2 +- tests/validation/CL/ArithmeticAddition.cpp | 2 +- tests/validation/CL/ArithmeticSubtraction.cpp | 2 +- tests/validation/CL/BatchNormalizationLayer.cpp | 2 +- tests/validation/CL/DirectConvolutionLayer.cpp | 6 +++--- tests/validation/CL/NormalizationLayer.cpp | 2 +- tests/validation/CL/PixelWiseMultiplication.cpp | 2 +- tests/validation/CL/PoolingLayer.cpp | 2 +- tests/validation/CL/SoftmaxLayer.cpp | 2 +- tests/validation/NEON/ArithmeticAddition.cpp | 2 +- tests/validation/NEON/ArithmeticSubtraction.cpp | 2 +- tests/validation/NEON/BatchNormalizationLayer.cpp | 2 +- tests/validation/NEON/Col2Im.cpp | 2 +- tests/validation/NEON/GEMMLowp.cpp | 10 +++++----- tests/validation/NEON/Im2Col.cpp | 2 +- tests/validation/NEON/PixelWiseMultiplication.cpp | 2 +- tests/validation/NEON/UNIT/TensorAllocator.cpp | 8 ++++---- 17 files changed, 26 insertions(+), 26 deletions(-) (limited to 'tests/validation') diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index 6ebbb57f87..d3f55d9959 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -189,7 +189,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), })), - framework::dataset::make("Expected", { true, true, false, false, true, true, true, false, false })), + framework::dataset::make("Expected", { false, false, true, true, false, false, false, true, true })), input_info, output_info, act_info, expected) { ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp index 61b3b06d74..787b1b986f 100644 --- a/tests/validation/CL/ArithmeticAddition.cpp +++ b/tests/validation/CL/ArithmeticAddition.cpp @@ -88,7 +88,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { false, false, true, true, true, true, false })), + framework::dataset::make("Expected", { true, true, false, false, false, false, true })), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp index 9a290cfe30..34fdb0b934 100644 --- a/tests/validation/CL/ArithmeticSubtraction.cpp +++ b/tests/validation/CL/ArithmeticSubtraction.cpp @@ -95,7 +95,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { false, false, true, true, true, true, false })), + framework::dataset::make("Expected", { true, true, false, false, false, false, true })), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLArithmeticSubtraction::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp index 4976c1c1a6..30dd70a66a 100644 --- a/tests/validation/CL/BatchNormalizationLayer.cpp +++ b/tests/validation/CL/BatchNormalizationLayer.cpp @@ -108,7 +108,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { false, true, true, true, true, true, false, false})), + framework::dataset::make("Expected", { true, false, false, false, false, false, true, true})), input_info, output_info, mvbg_info, expected) { const auto &mean_info = mvbg_info; diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp index e2e1b6abef..91b3c958d4 100644 --- a/tests/validation/CL/DirectConvolutionLayer.cpp +++ b/tests/validation/CL/DirectConvolutionLayer.cpp @@ -143,11 +143,11 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), })), - framework::dataset::make("Expected", { true, true, true, true, true, true, true, true, true, true, false })), + framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false, false, true })), input_info, weights_info, biases_info, output_info, conv_info, expected) { - bool is_error = bool(CLDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info)); - ARM_COMPUTE_EXPECT(is_error == expected, framework::LogLevel::ERRORS); + bool is_valid = bool(CLDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp index 0ed52840ae..74433c1884 100644 --- a/tests/validation/CL/NormalizationLayer.cpp +++ b/tests/validation/CL/NormalizationLayer.cpp @@ -99,7 +99,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( NormalizationLayerInfo(NormType::IN_MAP_1D, 5), NormalizationLayerInfo(NormType::CROSS_MAP, 5), })), - framework::dataset::make("Expected", { true, true, true, true, true, true, false })), + framework::dataset::make("Expected", { false, false, false, false, false, false, true })), input_info, output_info, norm_info, expected) { ARM_COMPUTE_EXPECT(bool(CLNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), norm_info)) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/CL/PixelWiseMultiplication.cpp b/tests/validation/CL/PixelWiseMultiplication.cpp index 7431a0983f..031f10f1cf 100644 --- a/tests/validation/CL/PixelWiseMultiplication.cpp +++ b/tests/validation/CL/PixelWiseMultiplication.cpp @@ -124,7 +124,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("Scale",{ 2.f, 2.f, 2.f, -1.f, 1.f, 1.f, 1.f, 1.f, 3.f})), - framework::dataset::make("Expected", { false, false, true, true, true, true, true, true, true })), + framework::dataset::make("Expected", { true, true, false, false, false, false, false, false, false })), input1_info, input2_info, output_info, scale, expected) { bool has_error = bool(CLPixelWiseMultiplication::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), scale, ConvertPolicy::WRAP, RoundingPolicy::TO_ZERO)); diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp index de647a1b1e..ee639376c5 100644 --- a/tests/validation/CL/PoolingLayer.cpp +++ b/tests/validation/CL/PoolingLayer.cpp @@ -104,7 +104,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( PoolingLayerInfo(PoolingType::MAX), PoolingLayerInfo(PoolingType::AVG), })), - framework::dataset::make("Expected", { true, true, true, true, true, true, true, true, true, false })), + framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false, true })), input_info, output_info, pool_info, expected) { ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp index bd7072377a..46ce63ed75 100644 --- a/tests/validation/CL/SoftmaxLayer.cpp +++ b/tests/validation/CL/SoftmaxLayer.cpp @@ -135,7 +135,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 0)), })), - framework::dataset::make("Expected", { true, true, true, true, true, false, false, false })), + framework::dataset::make("Expected", { false, false, false, false, false, true, true, true })), input_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLSoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp index 21a8c4b79f..e20e8df665 100644 --- a/tests/validation/NEON/ArithmeticAddition.cpp +++ b/tests/validation/NEON/ArithmeticAddition.cpp @@ -93,7 +93,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { false, false, true, true, true, true, false })), + framework::dataset::make("Expected", { true, true, false, false, false, false, true })), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(NEArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp index 1a31defb46..f5a50335db 100644 --- a/tests/validation/NEON/ArithmeticSubtraction.cpp +++ b/tests/validation/NEON/ArithmeticSubtraction.cpp @@ -97,7 +97,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { false, false, true, true, true, true, false })), + framework::dataset::make("Expected", { true, true, false, false, false, false, true })), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(NEArithmeticSubtraction::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp index 806d3b3f20..dfa32bbb07 100644 --- a/tests/validation/NEON/BatchNormalizationLayer.cpp +++ b/tests/validation/NEON/BatchNormalizationLayer.cpp @@ -110,7 +110,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { false, true, true, true, true, true, false, false})), + framework::dataset::make("Expected", { true, false, false, false, false, false, true, true})), input_info, output_info, mvbg_info, expected) { const auto &mean_info = mvbg_info; diff --git a/tests/validation/NEON/Col2Im.cpp b/tests/validation/NEON/Col2Im.cpp index c835c27f18..9125dc2498 100644 --- a/tests/validation/NEON/Col2Im.cpp +++ b/tests/validation/NEON/Col2Im.cpp @@ -55,7 +55,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( })), framework::dataset::make("ConvolvedWidth", { 3, 3, 3, 3, 3 })), framework::dataset::make("ConvolvedHeight", { 4, 4, 4, 4, 4 })), - framework::dataset::make("Expected", { true, true, true, true, false })), + framework::dataset::make("Expected", { false, false, false, false, true })), input_info, output_info, convolved_width, convolved_height, expected) { bool err = bool(NECol2Im::validate(&input_info, &output_info, Size2D(convolved_width, convolved_height))); diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index a49ca4670a..7616df9eaa 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -147,14 +147,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(8U, 11U), 1, DataType::S32), TensorInfo(TensorShape(64U, 32U), 1, DataType::S32), })), - framework::dataset::make("Expected", { true, true, true, true, false })), + framework::dataset::make("Expected", { false, false, false, false, true })), a_info, b_info, output_info, expected) { // Lock tensors - Error error = NEGEMMLowpMatrixMultiplyCore::validate(&a_info.clone()->set_is_resizable(false), - &b_info.clone()->set_is_resizable(false), - &output_info.clone()->set_is_resizable(false)); - ARM_COMPUTE_EXPECT(bool(error) == expected, framework::LogLevel::ERRORS); + Status status = NEGEMMLowpMatrixMultiplyCore::validate(&a_info.clone()->set_is_resizable(false), + &b_info.clone()->set_is_resizable(false), + &output_info.clone()->set_is_resizable(false)); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* diff --git a/tests/validation/NEON/Im2Col.cpp b/tests/validation/NEON/Im2Col.cpp index b05b8daed1..4faa7d7d66 100644 --- a/tests/validation/NEON/Im2Col.cpp +++ b/tests/validation/NEON/Im2Col.cpp @@ -53,7 +53,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QASYMM8), })), framework::dataset::make("HasBias", { true, true, true, true, false })), - framework::dataset::make("Expected", { true, true, true, true, false })), + framework::dataset::make("Expected", { false, false, false, false, true })), input_info, output_info, has_bias, expected) { bool err = bool(NEIm2Col::validate(&input_info, &output_info, Size2D(3U, 3U), PadStrideInfo(), has_bias)); diff --git a/tests/validation/NEON/PixelWiseMultiplication.cpp b/tests/validation/NEON/PixelWiseMultiplication.cpp index ac0f768308..44b4ff289c 100644 --- a/tests/validation/NEON/PixelWiseMultiplication.cpp +++ b/tests/validation/NEON/PixelWiseMultiplication.cpp @@ -155,7 +155,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("Scale",{ scale_unity, scale_unity, scale_unity, -1.f, scale_unity, scale_unity, scale_unity, scale_unity, 3.f})), - framework::dataset::make("Expected", { false, false, true, true, true, true, true, true, true })), + framework::dataset::make("Expected", { true, true, false, false, false, false, false, false, false })), input1_info, input2_info, output_info, scale, expected) { bool has_error = bool(NEPixelWiseMultiplication::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), scale, ConvertPolicy::WRAP, RoundingPolicy::TO_ZERO)); diff --git a/tests/validation/NEON/UNIT/TensorAllocator.cpp b/tests/validation/NEON/UNIT/TensorAllocator.cpp index 069831880a..4732f3f088 100644 --- a/tests/validation/NEON/UNIT/TensorAllocator.cpp +++ b/tests/validation/NEON/UNIT/TensorAllocator.cpp @@ -53,20 +53,20 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL) // Negative case : Import empty memory Tensor t1; t1.allocator()->init(info); - ARM_COMPUTE_EXPECT(bool(t1.allocator()->import_memory(Memory())), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(Memory())), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS); // Negative case : Import memory to a tensor that is memory managed Tensor t2; MemoryGroup mg; t2.allocator()->set_associated_memory_group(&mg); - ARM_COMPUTE_EXPECT(bool(t2.allocator()->import_memory(Memory(buf.get()))), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(Memory(buf.get()))), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS); // Positive case : Set raw pointer Tensor t3; t3.allocator()->init(info); - ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(Memory(buf.get()))), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(t3.allocator()->import_memory(Memory(buf.get()))), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!t3.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t3.buffer() == buf.get(), framework::LogLevel::ERRORS); t3.allocator()->free(); @@ -76,7 +76,7 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL) // Positive case : Set managed pointer Tensor t4; t4.allocator()->init(info); - ARM_COMPUTE_EXPECT(!bool(t4.allocator()->import_memory(Memory(buf))), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(Memory(buf))), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t4.buffer() == buf.get(), framework::LogLevel::ERRORS); t4.allocator()->free(); -- cgit v1.2.1