diff options
Diffstat (limited to 'examples/graph_alexnet.cpp')
-rw-r--r-- | examples/graph_alexnet.cpp | 157 |
1 files changed, 75 insertions, 82 deletions
diff --git a/examples/graph_alexnet.cpp b/examples/graph_alexnet.cpp index 40bbee1d68..be0b8a7d8a 100644 --- a/examples/graph_alexnet.cpp +++ b/examples/graph_alexnet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -39,8 +39,7 @@ using namespace arm_compute::graph_utils; class GraphAlexnetExample : public Example { public: - GraphAlexnetExample() - : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "AlexNet") + GraphAlexnetExample() : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "AlexNet") { } bool do_setup(int argc, char **argv) override @@ -53,14 +52,15 @@ public: common_params = consume_common_graph_parameters(common_opts); // Return when help menu is requested - if(common_params.help) + if (common_params.help) { cmd_parser.print_help(argv[0]); return false; } // Checks - ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "QASYMM8 not supported for this graph"); + ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), + "QASYMM8 not supported for this graph"); // Print parameter values std::cout << common_params << std::endl; @@ -69,88 +69,80 @@ public: std::string data_path = common_params.data_path; // Create a preprocessor object - const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } }; - std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb); + const std::array<float, 3> mean_rgb{{122.68f, 116.67f, 104.01f}}; + std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb); // Create input descriptor const auto operation_layout = common_params.data_layout; - const TensorShape tensor_shape = permute_shape(TensorShape(227U, 227U, 3U, 1U), DataLayout::NCHW, operation_layout); - TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout); + const TensorShape tensor_shape = + permute_shape(TensorShape(227U, 227U, 3U, common_params.batches), DataLayout::NCHW, operation_layout); + TensorDescriptor input_descriptor = + TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout); // Set weights trained layout const DataLayout weights_layout = DataLayout::NCHW; - graph << common_params.target - << common_params.fast_math_hint - << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor))) - // Layer 1 - << ConvolutionLayer( - 11U, 11U, 96U, - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv1_w.npy", weights_layout), - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv1_b.npy"), - PadStrideInfo(4, 4, 0, 0)) - .set_name("conv1") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu1") - << NormalizationLayer(NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f)).set_name("norm1") - << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0))).set_name("pool1") - // Layer 2 - << ConvolutionLayer( - 5U, 5U, 256U, - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv2_w.npy", weights_layout), - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv2_b.npy"), - PadStrideInfo(1, 1, 2, 2), 2) - .set_name("conv2") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu2") - << NormalizationLayer(NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f)).set_name("norm2") - << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0))).set_name("pool2") - // Layer 3 - << ConvolutionLayer( - 3U, 3U, 384U, - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv3_w.npy", weights_layout), - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv3_b.npy"), - PadStrideInfo(1, 1, 1, 1)) - .set_name("conv3") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu3") - // Layer 4 - << ConvolutionLayer( - 3U, 3U, 384U, - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv4_w.npy", weights_layout), - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv4_b.npy"), - PadStrideInfo(1, 1, 1, 1), 2) - .set_name("conv4") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu4") - // Layer 5 - << ConvolutionLayer( - 3U, 3U, 256U, - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv5_w.npy", weights_layout), - get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv5_b.npy"), - PadStrideInfo(1, 1, 1, 1), 2) - .set_name("conv5") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu5") - << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0))).set_name("pool5") - // Layer 6 - << FullyConnectedLayer( - 4096U, - get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc6_w.npy", weights_layout), - get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc6_b.npy")) - .set_name("fc6") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu6") - // Layer 7 - << FullyConnectedLayer( - 4096U, - get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc7_w.npy", weights_layout), - get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc7_b.npy")) - .set_name("fc7") - << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu7") - // Layer 8 - << FullyConnectedLayer( - 1000U, - get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc8_w.npy", weights_layout), - get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc8_b.npy")) - .set_name("fc8") - // Softmax - << SoftmaxLayer().set_name("prob") - << OutputLayer(get_output_accessor(common_params, 5)); + graph + << common_params.target << common_params.fast_math_hint + << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor))) + // Layer 1 + << ConvolutionLayer(11U, 11U, 96U, + get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv1_w.npy", weights_layout), + get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv1_b.npy"), + PadStrideInfo(4, 4, 0, 0)) + .set_name("conv1") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu1") + << NormalizationLayer(NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f)).set_name("norm1") + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0))) + .set_name("pool1") + // Layer 2 + << ConvolutionLayer( + 5U, 5U, 256U, get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv2_w.npy", weights_layout), + get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv2_b.npy"), PadStrideInfo(1, 1, 2, 2), 2) + .set_name("conv2") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu2") + << NormalizationLayer(NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f)).set_name("norm2") + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0))) + .set_name("pool2") + // Layer 3 + << ConvolutionLayer( + 3U, 3U, 384U, get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv3_w.npy", weights_layout), + get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv3_b.npy"), PadStrideInfo(1, 1, 1, 1)) + .set_name("conv3") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu3") + // Layer 4 + << ConvolutionLayer( + 3U, 3U, 384U, get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv4_w.npy", weights_layout), + get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv4_b.npy"), PadStrideInfo(1, 1, 1, 1), 2) + .set_name("conv4") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu4") + // Layer 5 + << ConvolutionLayer( + 3U, 3U, 256U, get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv5_w.npy", weights_layout), + get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv5_b.npy"), PadStrideInfo(1, 1, 1, 1), 2) + .set_name("conv5") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu5") + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0))) + .set_name("pool5") + // Layer 6 + << FullyConnectedLayer(4096U, + get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc6_w.npy", weights_layout), + get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc6_b.npy")) + .set_name("fc6") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu6") + // Layer 7 + << FullyConnectedLayer(4096U, + get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc7_w.npy", weights_layout), + get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc7_b.npy")) + .set_name("fc7") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("relu7") + // Layer 8 + << FullyConnectedLayer(1000U, + get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc8_w.npy", weights_layout), + get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc8_b.npy")) + .set_name("fc8") + // Softmax + << SoftmaxLayer().set_name("prob") << OutputLayer(get_output_accessor(common_params, 5)); // Finalize graph GraphConfig config; @@ -159,10 +151,11 @@ public: config.use_tuner = common_params.enable_tuner; config.tuner_mode = common_params.tuner_mode; config.tuner_file = common_params.tuner_file; + config.mlgo_file = common_params.mlgo_file; // Load the precompiled kernels from a file into the kernel library, in this way the next time they are needed // compilation won't be required. - if(common_params.enable_cl_cache) + if (common_params.enable_cl_cache) { #ifdef ARM_COMPUTE_CL restore_program_cache_from_file(); @@ -172,7 +165,7 @@ public: graph.finalize(common_params.target, config); // Save the opencl kernels to a file - if(common_opts.enable_cl_cache) + if (common_opts.enable_cl_cache) { #ifdef ARM_COMPUTE_CL save_program_cache_to_file(); |