From e472082f831815c217677e3f1802ecaae1348e65 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Mon, 2 Oct 2017 17:44:52 +0100 Subject: COMPMID-549 Create a Logger for GraphAPI Change-Id: If912d8232e12cd496923d55d386898450dac09e2 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/89897 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- src/graph/Graph.cpp | 22 ------------ src/graph/nodes/ActivationLayer.cpp | 33 ++++++------------ src/graph/nodes/ConvolutionLayer.cpp | 60 ++++++++++++++------------------- src/graph/nodes/FullyConnectedLayer.cpp | 30 ++++++----------- src/graph/nodes/NormalizationLayer.cpp | 29 +++++----------- src/graph/nodes/PoolingLayer.cpp | 27 +++++---------- src/graph/nodes/SoftmaxLayer.cpp | 26 +++++--------- 7 files changed, 71 insertions(+), 156 deletions(-) (limited to 'src/graph') diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp index 25c4577df7..7dddb1cd9a 100644 --- a/src/graph/Graph.cpp +++ b/src/graph/Graph.cpp @@ -48,12 +48,6 @@ public: */ void configure(GraphHints _next_hints); - /** Sets whether to enable information print out - * - * @param[in] is_enabled Set to true if need info printed out - */ - void set_info_enablement(bool is_enabled); - GraphContext _ctx{}; std::vector _pipeline{}; std::vector> _tensors{}; @@ -64,7 +58,6 @@ public: std::unique_ptr _graph_output{ nullptr }; std::unique_ptr _current_node{ nullptr }; Tensor *_current_output{ nullptr }; - bool _info_enabled{ false }; private: Tensor *_current_input{ nullptr }; @@ -161,11 +154,6 @@ void Graph::Private::configure(GraphHints _next_hints) std::swap(_current_hints, _next_hints); } -void Graph::Private::set_info_enablement(bool is_enabled) -{ - _info_enabled = is_enabled; -} - void Graph::add_node(std::unique_ptr node) { ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_input == nullptr, "The graph's input must be set before the first node is added"); @@ -179,11 +167,6 @@ void Graph::add_node(std::unique_ptr node) { //Finalize the previous Node: _pimpl->configure(_pimpl->_next_hints); - - if(_pimpl->_info_enabled) - { - _pimpl->_current_node->print_info(); - } } else { @@ -231,11 +214,6 @@ void Graph::set_temp(TensorInfo &&tmp) _pimpl->_current_output = _pimpl->_tensors.back().get(); } -void Graph::set_info_enablement(bool is_enabled) -{ - _pimpl->set_info_enablement(is_enabled); -} - GraphHints &Graph::hints() { return _pimpl->_next_hints; diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp index da2dac04e2..5cd2a0bcc2 100644 --- a/src/graph/nodes/ActivationLayer.cpp +++ b/src/graph/nodes/ActivationLayer.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/graph/nodes/ActivationLayer.h" +#include "arm_compute/core/Logger.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CL/functions/CLActivationLayer.h" #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h" @@ -71,36 +72,24 @@ std::unique_ptr ActivationLayer::instantiate_node(GraphC { std::unique_ptr func; _target_hint = ctx.hints().target_hint(); - _input = input; - _output = output; if(_target_hint == TargetHint::OPENCL) { func = instantiate(input, output, _activation_info); + ARM_COMPUTE_LOG("Instantiating CLActivationLayer"); } else { func = instantiate(input, output, _activation_info); + ARM_COMPUTE_LOG("Instantiating NEActivationLayer"); } - return func; -} -void ActivationLayer::print_info() -{ - if(_target_hint == TargetHint::OPENCL) - { - std::cout << "Instantiating CLActivationLayer"; - } - else - { - std::cout << "Instantiating NEActivationLayer"; - } - - std::cout << " Data Type: " << _input->info()->data_type() - << " Input shape: " << _input->info()->tensor_shape() - << " Output shape: " << _output->info()->tensor_shape() - << " Activation function: " << _activation_info.activation() - << " a: " << _activation_info.a() - << " b: " << _activation_info.b() - << std::endl; + ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type() + << " Input shape: " << input->info()->tensor_shape() + << " Output shape: " << output->info()->tensor_shape() + << " Activation function: " << _activation_info.activation() + << " a: " << _activation_info.a() + << " b: " << _activation_info.b() + << std::endl); + return func; } diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp index a992095786..b47be8dc33 100644 --- a/src/graph/nodes/ConvolutionLayer.cpp +++ b/src/graph/nodes/ConvolutionLayer.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/graph/nodes/ConvolutionLayer.h" +#include "arm_compute/core/Logger.h" #include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h" #include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h" #include "arm_compute/runtime/IFunction.h" @@ -184,8 +185,6 @@ std::unique_ptr ConvolutionLayer::instantiate_node(Graph std::unique_ptr func; _target_hint = ctx.hints().target_hint(); - _input = input; - _output = output; const ConvolutionMethodHint conv_method_hint = ctx.hints().convolution_method_hint(); // Check if the weights and biases are loaded @@ -197,19 +196,21 @@ std::unique_ptr ConvolutionLayer::instantiate_node(Graph _biases.set_target(_target_hint); // Calculate output shape - TensorShape output_shape = calculate_convolution_layer_output_shape(_input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info); + TensorShape output_shape = calculate_convolution_layer_output_shape(input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info); // Output auto inizialitation if not yet initialized - arm_compute::auto_init_if_empty(*_output->info(), output_shape, 1, _input->info()->data_type(), _input->info()->fixed_point_position()); + arm_compute::auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position()); // Create appropriate convolution function if(_num_groups == 1) { - func = instantiate_convolution(conv_method_hint); + func = instantiate_convolution(input, output, conv_method_hint); + ARM_COMPUTE_LOG("Instantiating CLConvolutionLayer"); } else { - func = instantiate_grouped_convolution(conv_method_hint); + func = instantiate_grouped_convolution(input, output, conv_method_hint); + ARM_COMPUTE_LOG("Instantiating NEConvolutionLayer"); } // Fill weights @@ -223,49 +224,38 @@ std::unique_ptr ConvolutionLayer::instantiate_node(Graph _biases.allocate_and_fill_if_needed(); } - return func; -} + ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type() + << " Input Shape: " << input->info()->tensor_shape() + << " Weights shape: " << _weights.info().tensor_shape() + << " Biases Shape: " << _biases.info().tensor_shape() + << " Output Shape: " << output->info()->tensor_shape() + << " PadStrideInfo: " << _conv_info + << " Groups: " << _num_groups + << " WeightsInfo: " << _weights_info + << std::endl); -void ConvolutionLayer::print_info() -{ - if(_target_hint == TargetHint::OPENCL) - { - std::cout << "Instantiating CLConvolutionLayer"; - } - else - { - std::cout << "Instantiating NEConvolutionLayer"; - } - std::cout << " Data Type: " << _input->info()->data_type() - << " Input Shape: " << _input->info()->tensor_shape() - << " Weights shape: " << _weights.info().tensor_shape() - << " Biases Shape: " << _biases.info().tensor_shape() - << " Output Shape: " << _output->info()->tensor_shape() - << " PadStrideInfo: " << _conv_info - << " Groups: " << _num_groups - << " WeightsInfo: " << _weights_info - << std::endl; + return func; } -std::unique_ptr ConvolutionLayer::instantiate_convolution(ConvolutionMethodHint conv_method_hint) +std::unique_ptr ConvolutionLayer::instantiate_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint) { std::unique_ptr func; if(_target_hint == TargetHint::OPENCL) { - func = instantiate(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint); + func = instantiate(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint); } else { - func = instantiate(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint); + func = instantiate(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint); } return func; } -std::unique_ptr ConvolutionLayer::instantiate_grouped_convolution(ConvolutionMethodHint conv_method_hint) +std::unique_ptr ConvolutionLayer::instantiate_grouped_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint) { // Get tensor shapes - TensorShape input_shape = _input->info()->tensor_shape(); - TensorShape output_shape = _output->info()->tensor_shape(); + TensorShape input_shape = input->info()->tensor_shape(); + TensorShape output_shape = output->info()->tensor_shape(); TensorShape weights_shape = _weights.info().tensor_shape(); TensorShape biases_shape = _biases.info().tensor_shape(); @@ -309,8 +299,8 @@ std::unique_ptr ConvolutionLayer::instantiate_grouped_co // Create sub-tensors for input, output, weights and bias auto hint_to_use = (_target_hint == TargetHint::OPENCL) ? TargetHint::OPENCL : TargetHint::NEON; - _is[i] = SubTensor(_input, input_shape, input_coord, hint_to_use); - _os[i] = SubTensor(_output, output_shape, output_coord, hint_to_use); + _is[i] = SubTensor(input, input_shape, input_coord, hint_to_use); + _os[i] = SubTensor(output, output_shape, output_coord, hint_to_use); _ws[i] = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use); _bs[i] = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use); diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp index c317660b20..6b21810a36 100644 --- a/src/graph/nodes/FullyConnectedLayer.cpp +++ b/src/graph/nodes/FullyConnectedLayer.cpp @@ -24,6 +24,7 @@ #include "arm_compute/graph/nodes/FullyConnectedLayer.h" #include "arm_compute/core/Helpers.h" +#include "arm_compute/core/Logger.h" #include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h" #include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h" #include "support/ToolchainSupport.h" @@ -112,35 +113,24 @@ std::unique_ptr FullyConnectedLayer::instantiate_node(Gr std::unique_ptr func; _target_hint = ctx.hints().target_hint(); - _input = input; - _output = output; if(_target_hint == TargetHint::OPENCL) { func = instantiate(input, _weights, _biases, output); + ARM_COMPUTE_LOG("Instantiating CLFullyConnectedLayer"); } else { func = instantiate(input, _weights, _biases, output); + ARM_COMPUTE_LOG("Instantiating NEFullyConnectedLayer"); } - return func; -} + ARM_COMPUTE_LOG(" Type: " << input->info()->data_type() + << " Input Shape: " << input->info()->tensor_shape() + << " Weights shape: " << _weights.info().tensor_shape() + << " Biases Shape: " << _biases.info().tensor_shape() + << " Output Shape: " << output->info()->tensor_shape() + << std::endl); -void FullyConnectedLayer::print_info() -{ - if(_target_hint == TargetHint::OPENCL) - { - std::cout << "Instantiating CLFullyConnectedLayer"; - } - else - { - std::cout << "Instantiating NEFullyConnectedLayer"; - } - std::cout << " Type: " << _input->info()->data_type() - << " Input Shape: " << _input->info()->tensor_shape() - << " Weights shape: " << _weights.info().tensor_shape() - << " Biases Shape: " << _biases.info().tensor_shape() - << " Output Shape: " << _output->info()->tensor_shape() - << std::endl; + return func; } diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp index 99d07dc8da..47f0891dfb 100644 --- a/src/graph/nodes/NormalizationLayer.cpp +++ b/src/graph/nodes/NormalizationLayer.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/graph/nodes/NormalizationLayer.h" +#include "arm_compute/core/Logger.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CL/functions/CLNormalizationLayer.h" #include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h" @@ -71,35 +72,23 @@ std::unique_ptr NormalizationLayer::instantiate_node(Gra { std::unique_ptr func; _target_hint = ctx.hints().target_hint(); - _input = input; - _output = output; if(_target_hint == TargetHint::OPENCL) { func = instantiate(input, output, _norm_info); + ARM_COMPUTE_LOG("Instantiating CLNormalizationLayer"); } else { func = instantiate(input, output, _norm_info); + ARM_COMPUTE_LOG("Instantiating NENormalizationLayer"); } - return func; -} + ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type() + << " Input shape: " << input->info()->tensor_shape() + << " Output shape: " << output->info()->tensor_shape() + << " Normalization info: " << _norm_info + << std::endl); -void NormalizationLayer::print_info() -{ - if(_target_hint == TargetHint::OPENCL) - { - std::cout << "Instantiating CLNormalizationLayer"; - } - else - { - std::cout << "Instantiating NENormalizationLayer"; - } - - std::cout << " Data Type: " << _input->info()->data_type() - << " Input shape: " << _input->info()->tensor_shape() - << " Output shape: " << _output->info()->tensor_shape() - << " Normalization info: " << _norm_info - << std::endl; + return func; } diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp index 2a5e4cb3d8..317cf4d14f 100644 --- a/src/graph/nodes/PoolingLayer.cpp +++ b/src/graph/nodes/PoolingLayer.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/graph/nodes/PoolingLayer.h" +#include "arm_compute/core/Logger.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CL/functions/CLPoolingLayer.h" #include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h" @@ -71,34 +72,22 @@ std::unique_ptr PoolingLayer::instantiate_node(GraphCont { std::unique_ptr func; _target_hint = ctx.hints().target_hint(); - _input = input; - _output = output; if(_target_hint == TargetHint::OPENCL) { func = instantiate(input, output, _pool_info); + ARM_COMPUTE_LOG("Instantiating CLPoolingLayer"); } else { func = instantiate(input, output, _pool_info); + ARM_COMPUTE_LOG("Instantiating NEPoolingLayer"); } - return func; -} + ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type() + << " Input shape: " << input->info()->tensor_shape() + << " Output shape: " << output->info()->tensor_shape() + << " Pooling info: " << _pool_info << std::endl); -void PoolingLayer::print_info() -{ - if(_target_hint == TargetHint::OPENCL) - { - std::cout << "Instantiating CLPoolingLayer"; - } - else - { - std::cout << "Instantiating NEPoolingLayer"; - } - - std::cout << " Data Type: " << _input->info()->data_type() - << " Input shape: " << _input->info()->tensor_shape() - << " Output shape: " << _output->info()->tensor_shape() - << " Pooling info: " << _pool_info << std::endl; + return func; } diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp index 9e798ef7cc..86282448f7 100644 --- a/src/graph/nodes/SoftmaxLayer.cpp +++ b/src/graph/nodes/SoftmaxLayer.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/graph/nodes/SoftmaxLayer.h" +#include "arm_compute/core/Logger.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h" #include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h" @@ -65,33 +66,22 @@ std::unique_ptr SoftmaxLayer::instantiate_node(GraphCont { std::unique_ptr func; _target_hint = ctx.hints().target_hint(); - _input = input; - _output = output; if(_target_hint == TargetHint::OPENCL) { func = instantiate(input, output); + ARM_COMPUTE_LOG("Instantiating CLSoftmaxLayer"); } else { func = instantiate(input, output); + ARM_COMPUTE_LOG("Instantiating NESoftmaxLayer"); } - return func; -} + ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type() + << " Input shape: " << input->info()->tensor_shape() + << " Output shape: " << output->info()->tensor_shape() + << std::endl); -void SoftmaxLayer::print_info() -{ - if(_target_hint == TargetHint::OPENCL) - { - std::cout << "Instantiating CLSoftmaxLayer"; - } - else - { - std::cout << "Instantiating NESoftmaxLayer"; - } - std::cout << " Data Type: " << _input->info()->data_type() - << " Input shape: " << _input->info()->tensor_shape() - << " Output shape: " << _output->info()->tensor_shape() - << std::endl; + return func; } -- cgit v1.2.1