diff options
author | Sang-Hoon Park <sang-hoon.park@arm.com> | 2020-01-15 14:44:04 +0000 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2020-01-28 16:05:27 +0000 |
commit | 11fedda86532cf632b9a3ae4b0f57e85f2a7c4f4 (patch) | |
tree | 6fd8003a38fe9baa262696754bdd5cb1d1595947 /examples/graph_inception_v4.cpp | |
parent | 6c89ffac750010cb9335794defe8a366c04db937 (diff) | |
download | ComputeLibrary-11fedda86532cf632b9a3ae4b0f57e85f2a7c4f4.tar.gz |
COMPMID-2985 add data_layout to PoolingLayerInfo
- use data layout from PoolingLayerInfo if it's available
- deprecate constructors without data_layout
- (3RDPARTY_UPDATE) modify examples and test suites to give data layout
Change-Id: Ie9ae8cc4837c339ff69a16a816110be704863c2d
Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2603
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'examples/graph_inception_v4.cpp')
-rw-r--r-- | examples/graph_inception_v4.cpp | 37 |
1 files changed, 26 insertions, 11 deletions
diff --git a/examples/graph_inception_v4.cpp b/examples/graph_inception_v4.cpp index a322b2268d..7893930993 100644 --- a/examples/graph_inception_v4.cpp +++ b/examples/graph_inception_v4.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -65,8 +65,9 @@ public: std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>(); // Create input descriptor - const TensorShape tensor_shape = permute_shape(TensorShape(299U, 299U, 3U, 1U), DataLayout::NCHW, common_params.data_layout); - TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout); + const auto operation_layout = common_params.data_layout; + const TensorShape tensor_shape = permute_shape(TensorShape(299U, 299U, 3U, 1U), DataLayout::NCHW, operation_layout); + TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout); // Set weights trained layout const DataLayout weights_layout = DataLayout::NCHW; @@ -135,7 +136,7 @@ public: graph << get_inceptionC_block(data_path, weights_layout, "Mixed_7b").set_name("Mixed_7b/concat"); graph << get_inceptionC_block(data_path, weights_layout, "Mixed_7c").set_name("Mixed_7c/concat"); graph << get_inceptionC_block(data_path, weights_layout, "Mixed_7d").set_name("Mixed_7d/concat"); - graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG)).set_name("Logits/AvgPool_1a/AvgPool") + graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, operation_layout)).set_name("Logits/AvgPool_1a/AvgPool") << FlattenLayer().set_name("Logits/Flatten") << FullyConnectedLayer( 1001U, @@ -188,7 +189,9 @@ private: std::string total_path = "/cnn_data/inceptionv4_model/Mixed_3a_"; SubStream i_a(graph); - i_a << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true)).set_name("Mixed_3a/Branch_0/MaxPool_0a_3x3/MaxPool"); + i_a << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, common_params.data_layout, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), + true)) + .set_name("Mixed_3a/Branch_0/MaxPool_0a_3x3/MaxPool"); SubStream i_b(graph); i_b << ConvolutionLayer(3U, 3U, 96U, @@ -301,7 +304,9 @@ private: << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_5a/Branch_0/Conv2d_1a_3x3/Relu"); SubStream i_b(graph); - i_b << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true)).set_name("Mixed_5a/Branch_1/MaxPool_1a_3x3/MaxPool"); + i_b << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, common_params.data_layout, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), + true)) + .set_name("Mixed_5a/Branch_1/MaxPool_1a_3x3/MaxPool"); return ConcatLayer(std::move(i_a), std::move(i_b)); } @@ -383,7 +388,9 @@ private: << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0c_3x3/Relu"); SubStream i_d(graph); - i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true)).set_name(param_path + "/Branch_3/AvgPool_0a_3x3/AvgPool") + i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, common_params.data_layout, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), + true)) + .set_name(param_path + "/Branch_3/AvgPool_0a_3x3/AvgPool") << ConvolutionLayer(1U, 1U, 96U, get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy", weights_layout), std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0)) @@ -452,7 +459,9 @@ private: << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_6a/Branch_1/Conv2d_1a_3x3/Relu"); SubStream i_c(graph); - i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true)).set_name("Mixed_6a/Branch_2/MaxPool_1a_3x3/MaxPool"); + i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, common_params.data_layout, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), + true)) + .set_name("Mixed_6a/Branch_2/MaxPool_1a_3x3/MaxPool"); return ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c)); } @@ -567,7 +576,9 @@ private: << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0e_1x7/Relu"); SubStream i_d(graph); - i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true)).set_name(param_path + "/Branch_3/AvgPool_0a_3x3/AvgPool") + i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, common_params.data_layout, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), + true)) + .set_name(param_path + "/Branch_3/AvgPool_0a_3x3/AvgPool") << ConvolutionLayer(1U, 1U, 128U, get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy", weights_layout), std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0)) @@ -658,7 +669,9 @@ private: << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_7a/Branch_1/Conv2d_1a_3x3/Relu"); SubStream i_c(graph); - i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true)).set_name("Mixed_7a/Branch_2/MaxPool_1a_3x3/MaxPool"); + i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, common_params.data_layout, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), + true)) + .set_name("Mixed_7a/Branch_2/MaxPool_1a_3x3/MaxPool"); return ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c)); } @@ -811,7 +824,9 @@ private: i_c << ConcatLayer(std::move(i_c1), std::move(i_c2)).set_name(param_path + "/Branch_2/concat"); SubStream i_d(graph); - i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true)).set_name(param_path + "/Branch_3/AvgPool_0a_3x3/AvgPool") + i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, common_params.data_layout, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), + true)) + .set_name(param_path + "/Branch_3/AvgPool_0a_3x3/AvgPool") << ConvolutionLayer(1U, 1U, 256U, get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy", weights_layout), std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0)) |