aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUsama Arif <usama.arif@arm.com>2019-03-21 11:49:25 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-03-21 13:52:05 +0000
commitc63c3703db0bf351fc8d17fc1c0d3721abb3cc80 (patch)
treef441a18ef7b9ad71d3d4f9e872bece293469154a
parent89890c630c438debe9661eeb444d81d6e2b3e1a5 (diff)
downloadComputeLibrary-c63c3703db0bf351fc8d17fc1c0d3721abb3cc80.tar.gz
COMPMID-2074: Fix WidthConcatenate and HeightConcatenate validate tests
Change-Id: I14132883047d7df478e8e30917cb8bbaadd93be7 Signed-off-by: Usama Arif <usama.arif@arm.com> Reviewed-on: https://review.mlplatform.org/c/883 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--tests/validation/CL/WidthConcatenateLayer.cpp15
-rw-r--r--tests/validation/NEON/HeightConcatenateLayer.cpp14
-rw-r--r--tests/validation/NEON/WidthConcatenateLayer.cpp13
3 files changed, 27 insertions, 15 deletions
diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp
index 493320b9ad..0ca6d72bff 100644
--- a/tests/validation/CL/WidthConcatenateLayer.cpp
+++ b/tests/validation/CL/WidthConcatenateLayer.cpp
@@ -47,19 +47,24 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/output
TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching y dimension
TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching total width
- TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32)
+ TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(21U, 35U, 5U), 1, DataType::F32)
+
}),
framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(24U, 27U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32)
+ TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(10U, 35U, 5U), 1, DataType::F32)
})),
framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(47U, 27U, 5U), 1, DataType::F16),
TensorInfo(TensorShape(75U, 12U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(11U, 27U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32)
+ TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(31U, 35U, 5U), 1, DataType::F32)
+
})),
- framework::dataset::make("Expected", { false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, true, true })),
input_info1, input_info2, output_info,expected)
{
std::vector<TensorInfo> inputs_vector_info;
@@ -72,7 +77,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
inputs_vector_info_raw.emplace_back(&input);
}
- bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw,&output_info.clone()->set_is_resizable(false),DataLayoutDimension::WIDTH ));
+ bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw,&output_info.clone()->set_is_resizable(true),DataLayoutDimension::WIDTH ));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
diff --git a/tests/validation/NEON/HeightConcatenateLayer.cpp b/tests/validation/NEON/HeightConcatenateLayer.cpp
index f5400f9246..0d08824645 100644
--- a/tests/validation/NEON/HeightConcatenateLayer.cpp
+++ b/tests/validation/NEON/HeightConcatenateLayer.cpp
@@ -48,19 +48,23 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 15U, 5U), 1, DataType::F32), // Mismatching data type input/output
TensorInfo(TensorShape(22U, 27U, 5U), 1, DataType::F32), // Mismatching y dimension
TensorInfo(TensorShape(11U, 25U, 5U), 1, DataType::F32), // Mismatching total height
- TensorInfo(TensorShape(16U, 25U, 5U), 1, DataType::F32)
+ TensorInfo(TensorShape(16U, 25U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(35U, 21U, 5U), 1, DataType::F32)
+
}),
framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(23U, 15U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(22U, 127U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(11U, 26U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 25U, 5U), 1, DataType::F32)
+ TensorInfo(TensorShape(16U, 25U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(35U, 10U, 5U), 1, DataType::F32)
})),
framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 30U, 5U), 1, DataType::F16),
TensorInfo(TensorShape(22U, 12U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(11U, 7U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 50U, 5U), 1, DataType::F32)
+ TensorInfo(TensorShape(16U, 50U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(35U, 31U, 5U), 1, DataType::F32)
})),
- framework::dataset::make("Expected", { false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, true, true })),
input_info1, input_info2, output_info,expected)
{
std::vector<TensorInfo> inputs_vector_info;
@@ -73,7 +77,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
inputs_vector_info_raw.emplace_back(&input);
}
- bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), DataLayoutDimension::HEIGHT));
+ bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(true), DataLayoutDimension::HEIGHT));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
diff --git a/tests/validation/NEON/WidthConcatenateLayer.cpp b/tests/validation/NEON/WidthConcatenateLayer.cpp
index dba14ebb35..20df3f4d7d 100644
--- a/tests/validation/NEON/WidthConcatenateLayer.cpp
+++ b/tests/validation/NEON/WidthConcatenateLayer.cpp
@@ -47,19 +47,22 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/output
TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching y dimension
TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching total width
- TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32)
+ TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(21U, 35U, 5U), 1, DataType::F32)
}),
framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(24U, 27U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32)
+ TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(10U, 35U, 5U), 1, DataType::F32)
})),
framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(47U, 27U, 5U), 1, DataType::F16),
TensorInfo(TensorShape(75U, 12U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(11U, 27U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32)
+ TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(31U, 35U, 5U), 1, DataType::F32)
})),
- framework::dataset::make("Expected", { false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, true, true })),
input_info1, input_info2, output_info,expected)
{
std::vector<TensorInfo> inputs_vector_info;
@@ -73,7 +76,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
}
bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw,
- &output_info.clone()->set_is_resizable(false),DataLayoutDimension::WIDTH));
+ &output_info.clone()->set_is_resizable(true),DataLayoutDimension::WIDTH));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on