From 11fedda86532cf632b9a3ae4b0f57e85f2a7c4f4 Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Wed, 15 Jan 2020 14:44:04 +0000 Subject: COMPMID-2985 add data_layout to PoolingLayerInfo - use data layout from PoolingLayerInfo if it's available - deprecate constructors without data_layout - (3RDPARTY_UPDATE) modify examples and test suites to give data layout Change-Id: Ie9ae8cc4837c339ff69a16a816110be704863c2d Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2603 Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- examples/graph_inception_resnet_v1.cpp | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'examples/graph_inception_resnet_v1.cpp') diff --git a/examples/graph_inception_resnet_v1.cpp b/examples/graph_inception_resnet_v1.cpp index 89e44edc33..599aa5c8ac 100644 --- a/examples/graph_inception_resnet_v1.cpp +++ b/examples/graph_inception_resnet_v1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -97,8 +97,9 @@ public: std::unique_ptr preprocessor = arm_compute::support::cpp14::make_unique(0.f, 1.f); // Create input descriptor - const TensorShape tensor_shape = permute_shape(TensorShape(image_width, image_height, 3U, 1U), DataLayout::NCHW, common_params.data_layout); - TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout); + const auto operation_layout = common_params.data_layout; + const TensorShape tensor_shape = permute_shape(TensorShape(image_width, image_height, 3U, 1U), DataLayout::NCHW, operation_layout); + TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout); // Set weights trained layout const DataLayout weights_layout = DataLayout::NCHW; @@ -146,7 +147,7 @@ public: .set_name("Conv2d_2b_3x3/BatchNorm") << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu") // MaxPool_3a_3x3 - << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true)).set_name("MaxPool_3a_3x3/MaxPool") + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true)).set_name("MaxPool_3a_3x3/MaxPool") // Conv2d_3b_1x1 << ConvolutionLayer(1U, 1U, 80U, get_weights_accessor(data_path, "Conv2d_3b_1x1_weights.npy", weights_layout), @@ -201,7 +202,7 @@ public: block8_repeat(data_path, weights_layout, 1, 1.f, false); // Logits tail - graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG)).set_name("Logits/AvgPool_1a_8x8") + graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, operation_layout)).set_name("Logits/AvgPool_1a_8x8") << FlattenLayer().set_name("Logits/Flatten") << FullyConnectedLayer( 128U, @@ -592,7 +593,7 @@ private: // Branch 2 SubStream i_c(graph); - i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0), true)).set_name("Mixed_6a/Branch_2/MaxPool_1a_3x3"); + i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, common_params.data_layout, PadStrideInfo(2, 2, 0, 0), true)).set_name("Mixed_6a/Branch_2/MaxPool_1a_3x3"); // Concatenate graph << ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c)).set_name("Mixed_6a/concat"); @@ -695,7 +696,7 @@ private: // Branch 3 SubStream i_d(graph); - i_d << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0), true)).set_name("Mixed_7a/Branch_3/MaxPool_1a_3x3"); + i_d << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, common_params.data_layout, PadStrideInfo(2, 2, 0, 0), true)).set_name("Mixed_7a/Branch_3/MaxPool_1a_3x3"); // Concatenate graph << ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d)).set_name("Mixed_7a/concat"); -- cgit v1.2.1