aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2017-10-02 17:44:52 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commite472082f831815c217677e3f1802ecaae1348e65 (patch)
tree896fc2631e0d1113f9bd33ffb730973e886f2717
parent744b5edd1e7eedab8ac52a8cea33bf62fb95affc (diff)
downloadComputeLibrary-e472082f831815c217677e3f1802ecaae1348e65.tar.gz
COMPMID-549 Create a Logger for GraphAPI
Change-Id: If912d8232e12cd496923d55d386898450dac09e2 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/89897 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/Logger.h71
-rw-r--r--arm_compute/graph/Graph.h6
-rw-r--r--arm_compute/graph/INode.h4
-rw-r--r--arm_compute/graph/nodes/ActivationLayer.h1
-rw-r--r--arm_compute/graph/nodes/ConvolutionLayer.h9
-rw-r--r--arm_compute/graph/nodes/FullyConnectedLayer.h1
-rw-r--r--arm_compute/graph/nodes/NormalizationLayer.h1
-rw-r--r--arm_compute/graph/nodes/PoolingLayer.h1
-rw-r--r--arm_compute/graph/nodes/SoftmaxLayer.h2
-rw-r--r--examples/graph_alexnet.cpp3
-rw-r--r--examples/graph_lenet.cpp3
-rw-r--r--src/core/Logger.cpp56
-rw-r--r--src/graph/Graph.cpp22
-rw-r--r--src/graph/nodes/ActivationLayer.cpp33
-rw-r--r--src/graph/nodes/ConvolutionLayer.cpp60
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp30
-rw-r--r--src/graph/nodes/NormalizationLayer.cpp29
-rw-r--r--src/graph/nodes/PoolingLayer.cpp27
-rw-r--r--src/graph/nodes/SoftmaxLayer.cpp26
-rw-r--r--utils/GraphUtils.cpp2
-rw-r--r--utils/GraphUtils.h2
21 files changed, 211 insertions, 178 deletions
diff --git a/arm_compute/core/Logger.h b/arm_compute/core/Logger.h
new file mode 100644
index 0000000000..0848479d37
--- /dev/null
+++ b/arm_compute/core/Logger.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __ARM_COMPUTE_LOGGER_H__
+#define __ARM_COMPUTE_LOGGER_H__
+
+#include <iostream>
+#include <memory>
+
+#ifdef ARM_COMPUTE_DEBUG_ENABLED
+#define ARM_COMPUTE_LOG(x) (arm_compute::Logger::get().log_info() << x)
+#else /* ARM_COMPUTE_DEBUG_ENABLED */
+#define ARM_COMPUTE_LOG(...)
+#endif /* ARM_COMPUTE_DEBUG_ENABLED */
+
+namespace arm_compute
+{
+/**< Verbosity of the logger */
+enum class LoggerVerbosity
+{
+ NONE, /**< No info */
+ INFO /**< Log info */
+};
+
+/** Logger singleton class */
+class Logger
+{
+public:
+ static Logger &get();
+ void set_logger(std::ostream &ostream, LoggerVerbosity verbosity);
+ std::ostream &log_info();
+
+private:
+ /** Default constructor */
+ Logger();
+ /** Allow instances of this class to be moved */
+ Logger(Logger &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ Logger(const Logger &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ Logger &operator=(const Logger &) = delete;
+ /** Allow instances of this class to be moved */
+ Logger &operator=(Logger &&) = default;
+
+ std::ostream *_ostream;
+ std::ostream _nullstream;
+ LoggerVerbosity _verbosity;
+};
+} // arm_compute
+#endif /* __ARM_COMPUTE_LOGGER_H__ */ \ No newline at end of file
diff --git a/arm_compute/graph/Graph.h b/arm_compute/graph/Graph.h
index da41548119..9d06f44bee 100644
--- a/arm_compute/graph/Graph.h
+++ b/arm_compute/graph/Graph.h
@@ -70,11 +70,7 @@ public:
* @param[in] tmp Output info to set
*/
void set_temp(TensorInfo &&tmp);
- /** Sets whether to enable information print out
- *
- * @param[in] is_enabled Set to true if need info printed out
- */
- void set_info_enablement(bool is_enabled);
+
/** Returns the graph hints that are currently used
*
* @return Graph hints
diff --git a/arm_compute/graph/INode.h b/arm_compute/graph/INode.h
index 6ce9b1b986..1b22bdf639 100644
--- a/arm_compute/graph/INode.h
+++ b/arm_compute/graph/INode.h
@@ -58,8 +58,6 @@ public:
*/
TargetHint override_target_hint(TargetHint target_hint) const;
- virtual void print_info() = 0;
-
protected:
/** Interface to be implement that override the hints
*
@@ -71,8 +69,6 @@ protected:
protected:
TargetHint _target_hint{ TargetHint::DONT_CARE };
- ITensor *_input{ nullptr };
- ITensor *_output{ nullptr };
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/nodes/ActivationLayer.h b/arm_compute/graph/nodes/ActivationLayer.h
index ea32dd02a2..efe8112e77 100644
--- a/arm_compute/graph/nodes/ActivationLayer.h
+++ b/arm_compute/graph/nodes/ActivationLayer.h
@@ -45,7 +45,6 @@ public:
// Inherited methods overriden:
std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
- void print_info() override;
private:
const ActivationLayerInfo _activation_info; /**< Activation layer info */
diff --git a/arm_compute/graph/nodes/ConvolutionLayer.h b/arm_compute/graph/nodes/ConvolutionLayer.h
index 086bf03dfe..04ba3dd6b7 100644
--- a/arm_compute/graph/nodes/ConvolutionLayer.h
+++ b/arm_compute/graph/nodes/ConvolutionLayer.h
@@ -78,23 +78,26 @@ public:
// Inherited methods overriden:
std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
- void print_info() override;
private:
/** Instantiates a non-grouped convolution
*
+ * @param[in] input Input tensor
+ * @param[in] output Output tensor
* @param[in] conv_method_hint Hint that specifies which convolution layer method to use
*
* @return Convolution function
*/
- std::unique_ptr<arm_compute::IFunction> instantiate_convolution(ConvolutionMethodHint conv_method_hint);
+ std::unique_ptr<arm_compute::IFunction> instantiate_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint);
/** Instantiates a grouped convolution
*
+ * @param[in] input Input tensor
+ * @param[in] output Output tensor
* @param[in] conv_method_hint Hint that specifies which convolution layer method to use
*
* @return Grouped Convolution function
*/
- std::unique_ptr<arm_compute::IFunction> instantiate_grouped_convolution(ConvolutionMethodHint conv_method_hint);
+ std::unique_ptr<arm_compute::IFunction> instantiate_grouped_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint);
private:
unsigned int _conv_width; /**< Convolution width */
diff --git a/arm_compute/graph/nodes/FullyConnectedLayer.h b/arm_compute/graph/nodes/FullyConnectedLayer.h
index b05bc96c99..d31e060457 100644
--- a/arm_compute/graph/nodes/FullyConnectedLayer.h
+++ b/arm_compute/graph/nodes/FullyConnectedLayer.h
@@ -51,7 +51,6 @@ public:
// Inherited methods overriden:
std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
- void print_info() override;
// Inherited methods overriden:
private:
diff --git a/arm_compute/graph/nodes/NormalizationLayer.h b/arm_compute/graph/nodes/NormalizationLayer.h
index 52f67d2c31..02efd1cbeb 100644
--- a/arm_compute/graph/nodes/NormalizationLayer.h
+++ b/arm_compute/graph/nodes/NormalizationLayer.h
@@ -44,7 +44,6 @@ public:
// Inherited methods overriden:
std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
- void print_info() override;
private:
const NormalizationLayerInfo _norm_info; /**< Normalization layer information */
diff --git a/arm_compute/graph/nodes/PoolingLayer.h b/arm_compute/graph/nodes/PoolingLayer.h
index f07800a7b8..87b15d06cb 100644
--- a/arm_compute/graph/nodes/PoolingLayer.h
+++ b/arm_compute/graph/nodes/PoolingLayer.h
@@ -45,7 +45,6 @@ public:
// Inherited methods overriden:
std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
- void print_info() override;
private:
const PoolingLayerInfo _pool_info; /**< Pooling layer information */
diff --git a/arm_compute/graph/nodes/SoftmaxLayer.h b/arm_compute/graph/nodes/SoftmaxLayer.h
index 1515a0f28a..2e1bd98c8d 100644
--- a/arm_compute/graph/nodes/SoftmaxLayer.h
+++ b/arm_compute/graph/nodes/SoftmaxLayer.h
@@ -28,7 +28,6 @@
#include "arm_compute/graph/INode.h"
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/graph/Types.h"
-
namespace arm_compute
{
namespace graph
@@ -39,7 +38,6 @@ class SoftmaxLayer : public INode
public:
// Inherited methods overriden:
std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
- void print_info() override;
};
} // namespace graph
diff --git a/examples/graph_alexnet.cpp b/examples/graph_alexnet.cpp
index 9c736c5df1..dce7132785 100644
--- a/examples/graph_alexnet.cpp
+++ b/examples/graph_alexnet.cpp
@@ -25,6 +25,7 @@
#error "This example needs to be built with -DARM_COMPUTE_CL"
#endif /* ARM_COMPUTE_CL */
+#include "arm_compute/core/Logger.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/Nodes.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
@@ -102,7 +103,7 @@ void main_graph_alexnet(int argc, const char **argv)
}
Graph graph;
- graph.set_info_enablement(true);
+ arm_compute::Logger::get().set_logger(std::cout, arm_compute::LoggerVerbosity::INFO);
graph << hint
<< Tensor(TensorInfo(TensorShape(227U, 227U, 3U, batches), 1, DataType::F32), DummyAccessor())
diff --git a/examples/graph_lenet.cpp b/examples/graph_lenet.cpp
index 51b0881b1b..1427abe15f 100644
--- a/examples/graph_lenet.cpp
+++ b/examples/graph_lenet.cpp
@@ -25,6 +25,7 @@
#error "This example needs to be built with -DARM_COMPUTE_CL"
#endif /* ARM_COMPUTE_CL */
+#include "arm_compute/core/Logger.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/Nodes.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
@@ -101,7 +102,7 @@ void main_graph_lenet(int argc, const char **argv)
}
Graph graph;
- graph.set_info_enablement(true);
+ arm_compute::Logger::get().set_logger(std::cout, arm_compute::LoggerVerbosity::INFO);
//conv1 << pool1 << conv2 << pool2 << fc1 << act1 << fc2 << smx
graph << hint
diff --git a/src/core/Logger.cpp b/src/core/Logger.cpp
new file mode 100644
index 0000000000..9c3bf263a6
--- /dev/null
+++ b/src/core/Logger.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Logger.h"
+
+using namespace arm_compute;
+
+Logger::Logger()
+ : _ostream(&std::cout), _nullstream(nullptr), _verbosity(LoggerVerbosity::NONE)
+{
+}
+
+Logger &Logger::get()
+{
+ static Logger _instance;
+ return _instance;
+}
+
+void Logger::set_logger(std::ostream &ostream, LoggerVerbosity verbosity)
+{
+ _ostream = &ostream;
+ _verbosity = verbosity;
+}
+
+std::ostream &Logger::log_info()
+{
+ if(_verbosity == LoggerVerbosity::INFO)
+ {
+ return *_ostream;
+ }
+ else
+ {
+ return _nullstream;
+ }
+} \ No newline at end of file
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
index 25c4577df7..7dddb1cd9a 100644
--- a/src/graph/Graph.cpp
+++ b/src/graph/Graph.cpp
@@ -48,12 +48,6 @@ public:
*/
void configure(GraphHints _next_hints);
- /** Sets whether to enable information print out
- *
- * @param[in] is_enabled Set to true if need info printed out
- */
- void set_info_enablement(bool is_enabled);
-
GraphContext _ctx{};
std::vector<Stage> _pipeline{};
std::vector<std::unique_ptr<Tensor>> _tensors{};
@@ -64,7 +58,6 @@ public:
std::unique_ptr<Tensor> _graph_output{ nullptr };
std::unique_ptr<INode> _current_node{ nullptr };
Tensor *_current_output{ nullptr };
- bool _info_enabled{ false };
private:
Tensor *_current_input{ nullptr };
@@ -161,11 +154,6 @@ void Graph::Private::configure(GraphHints _next_hints)
std::swap(_current_hints, _next_hints);
}
-void Graph::Private::set_info_enablement(bool is_enabled)
-{
- _info_enabled = is_enabled;
-}
-
void Graph::add_node(std::unique_ptr<INode> node)
{
ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_input == nullptr, "The graph's input must be set before the first node is added");
@@ -179,11 +167,6 @@ void Graph::add_node(std::unique_ptr<INode> node)
{
//Finalize the previous Node:
_pimpl->configure(_pimpl->_next_hints);
-
- if(_pimpl->_info_enabled)
- {
- _pimpl->_current_node->print_info();
- }
}
else
{
@@ -231,11 +214,6 @@ void Graph::set_temp(TensorInfo &&tmp)
_pimpl->_current_output = _pimpl->_tensors.back().get();
}
-void Graph::set_info_enablement(bool is_enabled)
-{
- _pimpl->set_info_enablement(is_enabled);
-}
-
GraphHints &Graph::hints()
{
return _pimpl->_next_hints;
diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp
index da2dac04e2..5cd2a0bcc2 100644
--- a/src/graph/nodes/ActivationLayer.cpp
+++ b/src/graph/nodes/ActivationLayer.cpp
@@ -23,6 +23,7 @@
*/
#include "arm_compute/graph/nodes/ActivationLayer.h"
+#include "arm_compute/core/Logger.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
@@ -71,36 +72,24 @@ std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(GraphC
{
std::unique_ptr<arm_compute::IFunction> func;
_target_hint = ctx.hints().target_hint();
- _input = input;
- _output = output;
if(_target_hint == TargetHint::OPENCL)
{
func = instantiate<TargetHint::OPENCL>(input, output, _activation_info);
+ ARM_COMPUTE_LOG("Instantiating CLActivationLayer");
}
else
{
func = instantiate<TargetHint::NEON>(input, output, _activation_info);
+ ARM_COMPUTE_LOG("Instantiating NEActivationLayer");
}
- return func;
-}
-void ActivationLayer::print_info()
-{
- if(_target_hint == TargetHint::OPENCL)
- {
- std::cout << "Instantiating CLActivationLayer";
- }
- else
- {
- std::cout << "Instantiating NEActivationLayer";
- }
-
- std::cout << " Data Type: " << _input->info()->data_type()
- << " Input shape: " << _input->info()->tensor_shape()
- << " Output shape: " << _output->info()->tensor_shape()
- << " Activation function: " << _activation_info.activation()
- << " a: " << _activation_info.a()
- << " b: " << _activation_info.b()
- << std::endl;
+ ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Activation function: " << _activation_info.activation()
+ << " a: " << _activation_info.a()
+ << " b: " << _activation_info.b()
+ << std::endl);
+ return func;
}
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
index a992095786..b47be8dc33 100644
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ b/src/graph/nodes/ConvolutionLayer.cpp
@@ -23,6 +23,7 @@
*/
#include "arm_compute/graph/nodes/ConvolutionLayer.h"
+#include "arm_compute/core/Logger.h"
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
#include "arm_compute/runtime/IFunction.h"
@@ -184,8 +185,6 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Graph
std::unique_ptr<arm_compute::IFunction> func;
_target_hint = ctx.hints().target_hint();
- _input = input;
- _output = output;
const ConvolutionMethodHint conv_method_hint = ctx.hints().convolution_method_hint();
// Check if the weights and biases are loaded
@@ -197,19 +196,21 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Graph
_biases.set_target(_target_hint);
// Calculate output shape
- TensorShape output_shape = calculate_convolution_layer_output_shape(_input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
+ TensorShape output_shape = calculate_convolution_layer_output_shape(input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
// Output auto inizialitation if not yet initialized
- arm_compute::auto_init_if_empty(*_output->info(), output_shape, 1, _input->info()->data_type(), _input->info()->fixed_point_position());
+ arm_compute::auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position());
// Create appropriate convolution function
if(_num_groups == 1)
{
- func = instantiate_convolution(conv_method_hint);
+ func = instantiate_convolution(input, output, conv_method_hint);
+ ARM_COMPUTE_LOG("Instantiating CLConvolutionLayer");
}
else
{
- func = instantiate_grouped_convolution(conv_method_hint);
+ func = instantiate_grouped_convolution(input, output, conv_method_hint);
+ ARM_COMPUTE_LOG("Instantiating NEConvolutionLayer");
}
// Fill weights
@@ -223,49 +224,38 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Graph
_biases.allocate_and_fill_if_needed();
}
- return func;
-}
+ ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type()
+ << " Input Shape: " << input->info()->tensor_shape()
+ << " Weights shape: " << _weights.info().tensor_shape()
+ << " Biases Shape: " << _biases.info().tensor_shape()
+ << " Output Shape: " << output->info()->tensor_shape()
+ << " PadStrideInfo: " << _conv_info
+ << " Groups: " << _num_groups
+ << " WeightsInfo: " << _weights_info
+ << std::endl);
-void ConvolutionLayer::print_info()
-{
- if(_target_hint == TargetHint::OPENCL)
- {
- std::cout << "Instantiating CLConvolutionLayer";
- }
- else
- {
- std::cout << "Instantiating NEConvolutionLayer";
- }
- std::cout << " Data Type: " << _input->info()->data_type()
- << " Input Shape: " << _input->info()->tensor_shape()
- << " Weights shape: " << _weights.info().tensor_shape()
- << " Biases Shape: " << _biases.info().tensor_shape()
- << " Output Shape: " << _output->info()->tensor_shape()
- << " PadStrideInfo: " << _conv_info
- << " Groups: " << _num_groups
- << " WeightsInfo: " << _weights_info
- << std::endl;
+ return func;
}
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ConvolutionMethodHint conv_method_hint)
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint)
{
std::unique_ptr<arm_compute::IFunction> func;
if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<TargetHint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::OPENCL>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
}
else
{
- func = instantiate<TargetHint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::NEON>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
}
return func;
}
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_convolution(ConvolutionMethodHint conv_method_hint)
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint)
{
// Get tensor shapes
- TensorShape input_shape = _input->info()->tensor_shape();
- TensorShape output_shape = _output->info()->tensor_shape();
+ TensorShape input_shape = input->info()->tensor_shape();
+ TensorShape output_shape = output->info()->tensor_shape();
TensorShape weights_shape = _weights.info().tensor_shape();
TensorShape biases_shape = _biases.info().tensor_shape();
@@ -309,8 +299,8 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_co
// Create sub-tensors for input, output, weights and bias
auto hint_to_use = (_target_hint == TargetHint::OPENCL) ? TargetHint::OPENCL : TargetHint::NEON;
- _is[i] = SubTensor(_input, input_shape, input_coord, hint_to_use);
- _os[i] = SubTensor(_output, output_shape, output_coord, hint_to_use);
+ _is[i] = SubTensor(input, input_shape, input_coord, hint_to_use);
+ _os[i] = SubTensor(output, output_shape, output_coord, hint_to_use);
_ws[i] = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
_bs[i] = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index c317660b20..6b21810a36 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -24,6 +24,7 @@
#include "arm_compute/graph/nodes/FullyConnectedLayer.h"
#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Logger.h"
#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
#include "support/ToolchainSupport.h"
@@ -112,35 +113,24 @@ std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Gr
std::unique_ptr<arm_compute::IFunction> func;
_target_hint = ctx.hints().target_hint();
- _input = input;
- _output = output;
if(_target_hint == TargetHint::OPENCL)
{
func = instantiate<TargetHint::OPENCL>(input, _weights, _biases, output);
+ ARM_COMPUTE_LOG("Instantiating CLFullyConnectedLayer");
}
else
{
func = instantiate<TargetHint::NEON>(input, _weights, _biases, output);
+ ARM_COMPUTE_LOG("Instantiating NEFullyConnectedLayer");
}
- return func;
-}
+ ARM_COMPUTE_LOG(" Type: " << input->info()->data_type()
+ << " Input Shape: " << input->info()->tensor_shape()
+ << " Weights shape: " << _weights.info().tensor_shape()
+ << " Biases Shape: " << _biases.info().tensor_shape()
+ << " Output Shape: " << output->info()->tensor_shape()
+ << std::endl);
-void FullyConnectedLayer::print_info()
-{
- if(_target_hint == TargetHint::OPENCL)
- {
- std::cout << "Instantiating CLFullyConnectedLayer";
- }
- else
- {
- std::cout << "Instantiating NEFullyConnectedLayer";
- }
- std::cout << " Type: " << _input->info()->data_type()
- << " Input Shape: " << _input->info()->tensor_shape()
- << " Weights shape: " << _weights.info().tensor_shape()
- << " Biases Shape: " << _biases.info().tensor_shape()
- << " Output Shape: " << _output->info()->tensor_shape()
- << std::endl;
+ return func;
}
diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp
index 99d07dc8da..47f0891dfb 100644
--- a/src/graph/nodes/NormalizationLayer.cpp
+++ b/src/graph/nodes/NormalizationLayer.cpp
@@ -23,6 +23,7 @@
*/
#include "arm_compute/graph/nodes/NormalizationLayer.h"
+#include "arm_compute/core/Logger.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLNormalizationLayer.h"
#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h"
@@ -71,35 +72,23 @@ std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(Gra
{
std::unique_ptr<arm_compute::IFunction> func;
_target_hint = ctx.hints().target_hint();
- _input = input;
- _output = output;
if(_target_hint == TargetHint::OPENCL)
{
func = instantiate<TargetHint::OPENCL>(input, output, _norm_info);
+ ARM_COMPUTE_LOG("Instantiating CLNormalizationLayer");
}
else
{
func = instantiate<TargetHint::NEON>(input, output, _norm_info);
+ ARM_COMPUTE_LOG("Instantiating NENormalizationLayer");
}
- return func;
-}
+ ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Normalization info: " << _norm_info
+ << std::endl);
-void NormalizationLayer::print_info()
-{
- if(_target_hint == TargetHint::OPENCL)
- {
- std::cout << "Instantiating CLNormalizationLayer";
- }
- else
- {
- std::cout << "Instantiating NENormalizationLayer";
- }
-
- std::cout << " Data Type: " << _input->info()->data_type()
- << " Input shape: " << _input->info()->tensor_shape()
- << " Output shape: " << _output->info()->tensor_shape()
- << " Normalization info: " << _norm_info
- << std::endl;
+ return func;
}
diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp
index 2a5e4cb3d8..317cf4d14f 100644
--- a/src/graph/nodes/PoolingLayer.cpp
+++ b/src/graph/nodes/PoolingLayer.cpp
@@ -23,6 +23,7 @@
*/
#include "arm_compute/graph/nodes/PoolingLayer.h"
+#include "arm_compute/core/Logger.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
@@ -71,34 +72,22 @@ std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(GraphCont
{
std::unique_ptr<arm_compute::IFunction> func;
_target_hint = ctx.hints().target_hint();
- _input = input;
- _output = output;
if(_target_hint == TargetHint::OPENCL)
{
func = instantiate<TargetHint::OPENCL>(input, output, _pool_info);
+ ARM_COMPUTE_LOG("Instantiating CLPoolingLayer");
}
else
{
func = instantiate<TargetHint::NEON>(input, output, _pool_info);
+ ARM_COMPUTE_LOG("Instantiating NEPoolingLayer");
}
- return func;
-}
+ ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Pooling info: " << _pool_info << std::endl);
-void PoolingLayer::print_info()
-{
- if(_target_hint == TargetHint::OPENCL)
- {
- std::cout << "Instantiating CLPoolingLayer";
- }
- else
- {
- std::cout << "Instantiating NEPoolingLayer";
- }
-
- std::cout << " Data Type: " << _input->info()->data_type()
- << " Input shape: " << _input->info()->tensor_shape()
- << " Output shape: " << _output->info()->tensor_shape()
- << " Pooling info: " << _pool_info << std::endl;
+ return func;
}
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp
index 9e798ef7cc..86282448f7 100644
--- a/src/graph/nodes/SoftmaxLayer.cpp
+++ b/src/graph/nodes/SoftmaxLayer.cpp
@@ -23,6 +23,7 @@
*/
#include "arm_compute/graph/nodes/SoftmaxLayer.h"
+#include "arm_compute/core/Logger.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
@@ -65,33 +66,22 @@ std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphCont
{
std::unique_ptr<arm_compute::IFunction> func;
_target_hint = ctx.hints().target_hint();
- _input = input;
- _output = output;
if(_target_hint == TargetHint::OPENCL)
{
func = instantiate<TargetHint::OPENCL>(input, output);
+ ARM_COMPUTE_LOG("Instantiating CLSoftmaxLayer");
}
else
{
func = instantiate<TargetHint::NEON>(input, output);
+ ARM_COMPUTE_LOG("Instantiating NESoftmaxLayer");
}
- return func;
-}
+ ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << std::endl);
-void SoftmaxLayer::print_info()
-{
- if(_target_hint == TargetHint::OPENCL)
- {
- std::cout << "Instantiating CLSoftmaxLayer";
- }
- else
- {
- std::cout << "Instantiating NESoftmaxLayer";
- }
- std::cout << " Data Type: " << _input->info()->data_type()
- << " Input shape: " << _input->info()->tensor_shape()
- << " Output shape: " << _output->info()->tensor_shape()
- << std::endl;
+ return func;
}
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index f0b0dded18..bdd831075d 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -274,4 +274,4 @@ bool NumPyBinLoader::access_tensor(ITensor &tensor)
});
}
return true;
-}
+} \ No newline at end of file
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index c8cbb00237..5c370e5eba 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -119,7 +119,7 @@ public:
private:
const std::string _filename;
};
-} // namespace graph
+} // namespace graph_utils
} // namespace arm_compute
#endif /* __ARM_COMPUTE_GRAPH_UTILS_H__ */