From 9e4824c909b14dbaf7106e9527b0ffa22ef09bdc Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 12 Apr 2019 13:15:58 +0100 Subject: COMPMID-2111: ConcatenateLayer API should accept an index instead of an enum Alters the concatenate layer to be layout agnostic and accept an index as thec concatenation axis instead of an typed layout dependent enumeration. Change-Id: I0eaaf919f66a1ba1b09bbfb47c171fc1d4045530 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/994 Comments-Addressed: Arm Jenkins Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins --- tests/validation/CL/DepthConcatenateLayer.cpp | 4 ++-- tests/validation/CL/WidthConcatenateLayer.cpp | 4 ++-- tests/validation/NEON/DepthConcatenateLayer.cpp | 16 ++++++++-------- tests/validation/NEON/HeightConcatenateLayer.cpp | 2 +- tests/validation/NEON/WidthConcatenateLayer.cpp | 3 +-- tests/validation/fixtures/ConcatenateLayerFixture.h | 16 +--------------- 6 files changed, 15 insertions(+), 30 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/DepthConcatenateLayer.cpp b/tests/validation/CL/DepthConcatenateLayer.cpp index f4a693ca7d..5da8a34351 100644 --- a/tests/validation/CL/DepthConcatenateLayer.cpp +++ b/tests/validation/CL/DepthConcatenateLayer.cpp @@ -73,7 +73,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( inputs_vector_info_raw.emplace_back(&input); } - bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), DataLayoutDimension::CHANNEL)); + bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 2)); ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); } // clang-format on @@ -95,7 +95,7 @@ TEST_CASE(Configuration, framework::DatasetMode::ALL) // Create and configure function CLConcatenateLayer concat_layer; - concat_layer.configure({ &src1, &src2, &src3 }, &dst, DataLayoutDimension::CHANNEL); + concat_layer.configure({ &src1, &src2, &src3 }, &dst, 2); } template using CLDepthConcatenateLayerFixture = ConcatenateLayerValidationFixture; diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp index 0ca6d72bff..2c1eb7fada 100644 --- a/tests/validation/CL/WidthConcatenateLayer.cpp +++ b/tests/validation/CL/WidthConcatenateLayer.cpp @@ -77,7 +77,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( inputs_vector_info_raw.emplace_back(&input); } - bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw,&output_info.clone()->set_is_resizable(true),DataLayoutDimension::WIDTH )); + bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw,&output_info.clone()->set_is_resizable(true), 0)); ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); } // clang-format on @@ -99,7 +99,7 @@ TEST_CASE(Configuration, framework::DatasetMode::ALL) // Create and configure function CLConcatenateLayer concat_layer; - concat_layer.configure({ &src1, &src2, &src3 }, &dst, DataLayoutDimension::WIDTH); + concat_layer.configure({ &src1, &src2, &src3 }, &dst, 0); } template diff --git a/tests/validation/NEON/DepthConcatenateLayer.cpp b/tests/validation/NEON/DepthConcatenateLayer.cpp index 1b355ae17d..0ddb220d34 100644 --- a/tests/validation/NEON/DepthConcatenateLayer.cpp +++ b/tests/validation/NEON/DepthConcatenateLayer.cpp @@ -55,13 +55,13 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(23U, 27U, 4U), 1, DataType::F32), TensorInfo(TensorShape(16U, 27U, 6U), 1, DataType::F32) })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 27U, 9U), 1, DataType::F16), - TensorInfo(TensorShape(25U, 12U, 9U), 1, DataType::F32), - TensorInfo(TensorShape(23U, 27U, 8U), 1, DataType::F32), - TensorInfo(TensorShape(16U, 27U, 12U), 1, DataType::F32) - })), - framework::dataset::make("Expected", { false, false, false, true })), - input_info1, input_info2, output_info,expected) + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 27U, 9U), 1, DataType::F16), + TensorInfo(TensorShape(25U, 12U, 9U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 8U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 27U, 12U), 1, DataType::F32) + })), + framework::dataset::make("Expected", { false, false, false, true })), + input_info1, input_info2, output_info,expected) { std::vector inputs_vector_info; inputs_vector_info.emplace_back(std::move(input_info1)); @@ -73,7 +73,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( inputs_vector_info_raw.emplace_back(&input); } - bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), DataLayoutDimension::CHANNEL)); + bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 2)); ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); } // clang-format on diff --git a/tests/validation/NEON/HeightConcatenateLayer.cpp b/tests/validation/NEON/HeightConcatenateLayer.cpp index 0d08824645..9c23fb9bd3 100644 --- a/tests/validation/NEON/HeightConcatenateLayer.cpp +++ b/tests/validation/NEON/HeightConcatenateLayer.cpp @@ -77,7 +77,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( inputs_vector_info_raw.emplace_back(&input); } - bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(true), DataLayoutDimension::HEIGHT)); + bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(true), 1)); ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); } // clang-format on diff --git a/tests/validation/NEON/WidthConcatenateLayer.cpp b/tests/validation/NEON/WidthConcatenateLayer.cpp index 20df3f4d7d..ed840ef325 100644 --- a/tests/validation/NEON/WidthConcatenateLayer.cpp +++ b/tests/validation/NEON/WidthConcatenateLayer.cpp @@ -75,8 +75,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( inputs_vector_info_raw.emplace_back(&input); } - bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, - &output_info.clone()->set_is_resizable(true),DataLayoutDimension::WIDTH)); + bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(true), 0)); ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); } // clang-format on diff --git a/tests/validation/fixtures/ConcatenateLayerFixture.h b/tests/validation/fixtures/ConcatenateLayerFixture.h index db09957c09..39d4f9f95d 100644 --- a/tests/validation/fixtures/ConcatenateLayerFixture.h +++ b/tests/validation/fixtures/ConcatenateLayerFixture.h @@ -112,21 +112,7 @@ protected: // Create and configure function FunctionType concat; - switch(axis) - { - case 0: - concat.configure(src_ptrs, &dst, DataLayoutDimension::WIDTH); - break; - case 1: - concat.configure(src_ptrs, &dst, DataLayoutDimension::HEIGHT); - break; - case 2: - concat.configure(src_ptrs, &dst, DataLayoutDimension::CHANNEL); - break; - default: - ARM_COMPUTE_ERROR("Not supported"); - break; - } + concat.configure(src_ptrs, &dst, axis); for(auto &src : srcs) { -- cgit v1.2.1