aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-10-04 16:53:58 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitff421f2100e0e9e532f5fe78585300546af61690 (patch)
tree9ba5a1bfe64b5b10f70c64a965f9c5ca14de9ce3
parent925ca0f7402115da3bffb21c04fca0bc822c9b38 (diff)
downloadComputeLibrary-ff421f2100e0e9e532f5fe78585300546af61690.tar.gz
COMPMID-601: Add GraphContext
GraphContext hold all the information about the hints that need to be passed in the nodes. As these might expand, it serves as a centralized class for such information. Change-Id: I0b5527630fb97cc5fa500db0bac8307ff2ea36e6 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/90300 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/graph/Graph.h30
-rw-r--r--arm_compute/graph/GraphContext.h88
-rw-r--r--arm_compute/graph/INode.h27
-rw-r--r--arm_compute/graph/SubTensor.h6
-rw-r--r--arm_compute/graph/Tensor.h12
-rw-r--r--arm_compute/graph/Types.h2
-rw-r--r--arm_compute/graph/nodes/ActivationLayer.h5
-rw-r--r--arm_compute/graph/nodes/ConvolutionLayer.h3
-rw-r--r--arm_compute/graph/nodes/FullyConnectedLayer.h5
-rw-r--r--arm_compute/graph/nodes/NormalizationLayer.h3
-rw-r--r--arm_compute/graph/nodes/PoolingLayer.h5
-rw-r--r--arm_compute/graph/nodes/SoftmaxLayer.h3
-rw-r--r--examples/graph_alexnet.cpp5
-rw-r--r--examples/graph_lenet.cpp4
-rw-r--r--src/graph/Graph.cpp87
-rw-r--r--src/graph/GraphContext.cpp66
-rw-r--r--src/graph/INode.cpp20
-rw-r--r--src/graph/SubTensor.cpp15
-rw-r--r--src/graph/Tensor.cpp19
-rw-r--r--src/graph/nodes/ActivationLayer.cpp28
-rw-r--r--src/graph/nodes/ConvolutionLayer.cpp58
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp32
-rw-r--r--src/graph/nodes/NormalizationLayer.cpp28
-rw-r--r--src/graph/nodes/PoolingLayer.cpp28
-rw-r--r--src/graph/nodes/SoftmaxLayer.cpp28
25 files changed, 393 insertions, 214 deletions
diff --git a/arm_compute/graph/Graph.h b/arm_compute/graph/Graph.h
index 3c263c2bdd..da41548119 100644
--- a/arm_compute/graph/Graph.h
+++ b/arm_compute/graph/Graph.h
@@ -65,25 +65,21 @@ public:
* @param[in] tensor Tensor to add
*/
void add_tensor(std::unique_ptr<Tensor> tensor);
- /** Sets an execution hint to the graph
- *
- * @note Hint is propagated to the following node and as per name
- * its just a hint/preference to be considered by the graph executor
- *
- * @param[in] hint execution hint
- */
- void set_hint(Hint hint);
/** Manually sets the output of the current node
*
* @param[in] tmp Output info to set
*/
void set_temp(TensorInfo &&tmp);
-
/** Sets whether to enable information print out
*
* @param[in] is_enabled Set to true if need info printed out
*/
void set_info_enablement(bool is_enabled);
+ /** Returns the graph hints that are currently used
+ *
+ * @return Graph hints
+ */
+ GraphHints &hints();
private:
class Private;
@@ -106,14 +102,22 @@ Graph &operator<<(Graph &graph, TensorInfo &&info);
* @return Updated graph
*/
Graph &operator<<(Graph &graph, Tensor &&tensor);
-/** Overloaded stream operator to provide an execution hint to the graph
+/** Overloaded stream operator to provide a target hint to the graph
+ *
+ * @param[in, out] graph Graph to provide the hint to
+ * @param[in] target_hint Target hint to be considered
+ *
+ * @return Updated graph
+ */
+Graph &operator<<(Graph &graph, TargetHint target_hint);
+/** Overloaded stream operator to provide a convolution method hint to the graph
*
- * @param[in, out] graph Graph to provide the hint to
- * @param[in] hint Execution hint to be considered
+ * @param[in, out] graph Graph to provide the hint to
+ * @param[in] conv_method_hint Convolution method hint to be considered
*
* @return Updated graph
*/
-Graph &operator<<(Graph &graph, Hint hint);
+Graph &operator<<(Graph &graph, ConvolutionMethodHint conv_method_hint);
/** Overloaded stream operator to add a node to the graph
*
* @param[in, out] graph Graph to add the tensor
diff --git a/arm_compute/graph/GraphContext.h b/arm_compute/graph/GraphContext.h
new file mode 100644
index 0000000000..98bc8c02f8
--- /dev/null
+++ b/arm_compute/graph/GraphContext.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_CONTEXT_H__
+#define __ARM_COMPUTE_GRAPH_CONTEXT_H__
+
+#include "arm_compute/graph/Types.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Hints that can be passed to the graph to expose parameterization */
+class GraphHints
+{
+public:
+ /** Default Constructor */
+ GraphHints(TargetHint target_hint = TargetHint::DONT_CARE,
+ ConvolutionMethodHint conv_method_hint = ConvolutionMethodHint::GEMM);
+ /** Sets target execution hint
+ *
+ * @param target_hint Target execution hint
+ */
+ void set_target_hint(TargetHint target_hint);
+ /** Sets convolution method to use
+ *
+ * @param convolution_method Convolution method to use
+ */
+ void set_convolution_method_hint(ConvolutionMethodHint convolution_method);
+ /** Returns target execution hint
+ *
+ * @return target execution hint
+ */
+ TargetHint target_hint() const;
+ /** Returns convolution method hint
+ *
+ * @return convolution method hint
+ */
+ ConvolutionMethodHint convolution_method_hint() const;
+
+private:
+ TargetHint _target_hint; /**< Target execution hint */
+ ConvolutionMethodHint _convolution_method_hint; /**< Convolution method hint */
+};
+
+/** Graph context */
+class GraphContext
+{
+public:
+ /** Default Constuctor */
+ GraphContext();
+ /** Returns graph hints
+ *
+ * @return Graph hints
+ */
+ GraphHints &hints();
+ /** Returns graph hints
+ *
+ * @return Graph hints
+ */
+ const GraphHints &hints() const;
+
+private:
+ GraphHints _hints; /**< Graph hints */
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_CONTEXT_H__ */
diff --git a/arm_compute/graph/INode.h b/arm_compute/graph/INode.h
index 13b5d05f87..6ce9b1b986 100644
--- a/arm_compute/graph/INode.h
+++ b/arm_compute/graph/INode.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_GRAPH_INODE_H__
#define __ARM_COMPUTE_GRAPH_INODE_H__
+#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/Types.h"
#include "arm_compute/runtime/IFunction.h"
@@ -41,37 +42,37 @@ public:
virtual ~INode() = default;
/** Interface to be implemented that instantiates the node
*
- * @param[in] hint Hint to where the node should be executed
+ * @param[in] ctx Graph context to be used
* @param[in] input Input tensor of the node
* @param[in] output Output tensor of the node
*/
- virtual std::unique_ptr<arm_compute::IFunction> instantiate_node(Hint hint, ITensor *input, ITensor *output) = 0;
- /** Override the existing hint
+ virtual std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) = 0;
+ /** Override the existing target hint
*
* @note If the input is DONT_CARE then the method has to pick a technology,
* else it can accept the hint or override it (But not with DONT_CARE)
*
- * @param[in] hint Hint to be considered
+ * @param[in] target_hint Target hint to be considered
*
- * @return The updated hint
+ * @return The updated target hint
*/
- Hint override_hint(Hint hint) const;
+ TargetHint override_target_hint(TargetHint target_hint) const;
virtual void print_info() = 0;
protected:
- /** Interface to be implement that override the hint
+ /** Interface to be implement that override the hints
*
- * @param[in] hint Hint to be considered
+ * @param[in] hints Hints to be considered
*
- * @return The updated hint
+ * @return The updated hints
*/
- virtual Hint node_override_hint(Hint hint) const;
+ virtual GraphHints node_override_hints(GraphHints hints) const;
protected:
- Hint _hint{ Hint::DONT_CARE };
- ITensor *_input{ nullptr };
- ITensor *_output{ nullptr };
+ TargetHint _target_hint{ TargetHint::DONT_CARE };
+ ITensor *_input{ nullptr };
+ ITensor *_output{ nullptr };
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/SubTensor.h b/arm_compute/graph/SubTensor.h
index a73b0d6b0e..ace93d20a3 100644
--- a/arm_compute/graph/SubTensor.h
+++ b/arm_compute/graph/SubTensor.h
@@ -55,7 +55,7 @@ public:
* @param[in] coords Starting coordinates of the sub-tensor in the parent tensor
* @param[in] target Execution target
*/
- SubTensor(ITensor *parent, TensorShape tensor_shape, Coordinates coords, Hint target);
+ SubTensor(ITensor *parent, TensorShape tensor_shape, Coordinates coords, TargetHint target);
/** Prevent instances of this class from being copied (As this class contains pointers) */
SubTensor(const SubTensor &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
@@ -86,14 +86,14 @@ public:
*
* @return Target of the tensor
*/
- Hint target() const;
+ TargetHint target() const;
private:
/** Instantiates a sub-tensor */
void instantiate_subtensor();
private:
- Hint _target; /**< Target that this tensor is pinned on */
+ TargetHint _target; /**< Target that this tensor is pinned on */
Coordinates _coords; /**< SubTensor Coordinates */
SubTensorInfo _info; /**< SubTensor metadata */
ITensor *_parent; /**< Parent tensor */
diff --git a/arm_compute/graph/Tensor.h b/arm_compute/graph/Tensor.h
index 0e823ffad0..9fdd56db6e 100644
--- a/arm_compute/graph/Tensor.h
+++ b/arm_compute/graph/Tensor.h
@@ -49,7 +49,7 @@ public:
*/
template <typename AccessorType>
Tensor(std::unique_ptr<AccessorType> accessor)
- : _target(Hint::DONT_CARE), _info(), _accessor(std::move(accessor)), _tensor(nullptr)
+ : _target(TargetHint::DONT_CARE), _info(), _accessor(std::move(accessor)), _tensor(nullptr)
{
}
/** Constructor
@@ -58,7 +58,7 @@ public:
*/
template <typename AccessorType>
Tensor(AccessorType &&accessor)
- : _target(Hint::DONT_CARE), _info(), _accessor(arm_compute::support::cpp14::make_unique<AccessorType>(std::forward<AccessorType>(accessor))), _tensor(nullptr)
+ : _target(TargetHint::DONT_CARE), _info(), _accessor(arm_compute::support::cpp14::make_unique<AccessorType>(std::forward<AccessorType>(accessor))), _tensor(nullptr)
{
}
/** Constructor
@@ -68,7 +68,7 @@ public:
*/
template <typename AccessorType>
Tensor(TensorInfo &&info, AccessorType &&accessor)
- : _target(Hint::DONT_CARE), _info(info), _accessor(arm_compute::support::cpp14::make_unique<AccessorType>(std::forward<AccessorType>(accessor))), _tensor(nullptr)
+ : _target(TargetHint::DONT_CARE), _info(info), _accessor(arm_compute::support::cpp14::make_unique<AccessorType>(std::forward<AccessorType>(accessor))), _tensor(nullptr)
{
}
/** Default Destructor */
@@ -95,7 +95,7 @@ public:
*
* @return
*/
- ITensor *set_target(Hint target);
+ ITensor *set_target(TargetHint target);
/** Returns tensor's TensorInfo
*
* @return TensorInfo of the tensor
@@ -114,10 +114,10 @@ public:
*
* @return Target of the tensor
*/
- Hint target() const;
+ TargetHint target() const;
private:
- Hint _target; /**< Target that this tensor is pinned on */
+ TargetHint _target; /**< Target that this tensor is pinned on */
TensorInfo _info; /**< Tensor metadata */
std::unique_ptr<ITensorAccessor> _accessor; /**< Tensor Accessor */
std::unique_ptr<ITensor> _tensor; /**< Tensor */
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index 538d64e9bb..e48ff84abf 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -47,7 +47,7 @@ using arm_compute::PoolingLayerInfo;
using arm_compute::PoolingType;
/**< Execution hint to the graph executor */
-enum class Hint
+enum class TargetHint
{
DONT_CARE, /**< Run node in any device */
OPENCL, /**< Run node on an OpenCL capable device (GPU) */
diff --git a/arm_compute/graph/nodes/ActivationLayer.h b/arm_compute/graph/nodes/ActivationLayer.h
index c23674e7b6..ea32dd02a2 100644
--- a/arm_compute/graph/nodes/ActivationLayer.h
+++ b/arm_compute/graph/nodes/ActivationLayer.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_GRAPH_ACTIVATION_LAYER_H__
#define __ARM_COMPUTE_GRAPH_ACTIVATION_LAYER_H__
+#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/INode.h"
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/graph/Types.h"
@@ -33,7 +34,7 @@ namespace arm_compute
namespace graph
{
/** Activation Layer node */
-class ActivationLayer : public INode
+class ActivationLayer final : public INode
{
public:
/** Default Constructor
@@ -43,7 +44,7 @@ public:
ActivationLayer(const ActivationLayerInfo activation_info);
// Inherited methods overriden:
- std::unique_ptr<arm_compute::IFunction> instantiate_node(Hint hint, ITensor *input, ITensor *output) override;
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
void print_info() override;
private:
diff --git a/arm_compute/graph/nodes/ConvolutionLayer.h b/arm_compute/graph/nodes/ConvolutionLayer.h
index fcd097bdaa..086bf03dfe 100644
--- a/arm_compute/graph/nodes/ConvolutionLayer.h
+++ b/arm_compute/graph/nodes/ConvolutionLayer.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_GRAPH_CONVOLUTION_LAYER_H__
#define __ARM_COMPUTE_GRAPH_CONVOLUTION_LAYER_H__
+#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/INode.h"
#include "arm_compute/graph/SubTensor.h"
#include "arm_compute/graph/Tensor.h"
@@ -76,7 +77,7 @@ public:
}
// Inherited methods overriden:
- std::unique_ptr<arm_compute::IFunction> instantiate_node(Hint hint, ITensor *input, ITensor *output) override;
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
void print_info() override;
private:
diff --git a/arm_compute/graph/nodes/FullyConnectedLayer.h b/arm_compute/graph/nodes/FullyConnectedLayer.h
index 3e1fe23b11..b05bc96c99 100644
--- a/arm_compute/graph/nodes/FullyConnectedLayer.h
+++ b/arm_compute/graph/nodes/FullyConnectedLayer.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_GRAPH_FULLY_CONNECTED_LAYER_H__
#define __ARM_COMPUTE_GRAPH_FULLY_CONNECTED_LAYER_H__
+#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/INode.h"
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/graph/Types.h"
@@ -33,7 +34,7 @@ namespace arm_compute
namespace graph
{
/** Fully connected layer node */
-class FullyConnectedLayer : public INode
+class FullyConnectedLayer final : public INode
{
public:
/** Default constructor
@@ -49,7 +50,7 @@ public:
}
// Inherited methods overriden:
- std::unique_ptr<arm_compute::IFunction> instantiate_node(Hint hint, ITensor *input, ITensor *output) override;
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
void print_info() override;
// Inherited methods overriden:
diff --git a/arm_compute/graph/nodes/NormalizationLayer.h b/arm_compute/graph/nodes/NormalizationLayer.h
index 40b9c2b467..52f67d2c31 100644
--- a/arm_compute/graph/nodes/NormalizationLayer.h
+++ b/arm_compute/graph/nodes/NormalizationLayer.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_GRAPH_NORMALIZATION_LAYER_H__
#define __ARM_COMPUTE_GRAPH_NORMALIZATION_LAYER_H__
+#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/INode.h"
#include "arm_compute/graph/Types.h"
@@ -42,7 +43,7 @@ public:
explicit NormalizationLayer(const NormalizationLayerInfo norm_info);
// Inherited methods overriden:
- std::unique_ptr<arm_compute::IFunction> instantiate_node(Hint hint, ITensor *input, ITensor *output) override;
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
void print_info() override;
private:
diff --git a/arm_compute/graph/nodes/PoolingLayer.h b/arm_compute/graph/nodes/PoolingLayer.h
index 14e2c6d264..f07800a7b8 100644
--- a/arm_compute/graph/nodes/PoolingLayer.h
+++ b/arm_compute/graph/nodes/PoolingLayer.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_GRAPH_POOLING_LAYER_H__
#define __ARM_COMPUTE_GRAPH_POOLING_LAYER_H__
+#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/INode.h"
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/graph/Types.h"
@@ -33,7 +34,7 @@ namespace arm_compute
namespace graph
{
/** Pooling layer node */
-class PoolingLayer : public INode
+class PoolingLayer final : public INode
{
public:
/** Default Constructor
@@ -43,7 +44,7 @@ public:
PoolingLayer(const PoolingLayerInfo pool_info);
// Inherited methods overriden:
- std::unique_ptr<arm_compute::IFunction> instantiate_node(Hint hint, ITensor *input, ITensor *output) override;
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
void print_info() override;
private:
diff --git a/arm_compute/graph/nodes/SoftmaxLayer.h b/arm_compute/graph/nodes/SoftmaxLayer.h
index 1779adae66..1515a0f28a 100644
--- a/arm_compute/graph/nodes/SoftmaxLayer.h
+++ b/arm_compute/graph/nodes/SoftmaxLayer.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_GRAPH_SOFTMAX_LAYER_H__
#define __ARM_COMPUTE_GRAPH_SOFTMAX_LAYER_H__
+#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/INode.h"
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/graph/Types.h"
@@ -37,7 +38,7 @@ class SoftmaxLayer : public INode
{
public:
// Inherited methods overriden:
- std::unique_ptr<arm_compute::IFunction> instantiate_node(Hint hint, ITensor *input, ITensor *output) override;
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output) override;
void print_info() override;
};
diff --git a/examples/graph_alexnet.cpp b/examples/graph_alexnet.cpp
index cf5f635d33..9c736c5df1 100644
--- a/examples/graph_alexnet.cpp
+++ b/examples/graph_alexnet.cpp
@@ -94,11 +94,11 @@ void main_graph_alexnet(int argc, const char **argv)
}
// Check if OpenCL is available and initialize the scheduler
- Hint hint = Hint::NEON;
+ TargetHint hint = TargetHint::NEON;
if(arm_compute::opencl_is_available())
{
arm_compute::CLScheduler::get().default_init();
- hint = Hint::OPENCL;
+ hint = TargetHint::OPENCL;
}
Graph graph;
@@ -116,6 +116,7 @@ void main_graph_alexnet(int argc, const char **argv)
<< NormalizationLayer(NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f))
<< PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)))
// Layer 2
+ << ConvolutionMethodHint::DIRECT
<< ConvolutionLayer(
5U, 5U, 256U,
get_accessor(data_path, "/cnn_data/alexnet_model/conv2_w.npy"),
diff --git a/examples/graph_lenet.cpp b/examples/graph_lenet.cpp
index 1f04ff8a50..51b0881b1b 100644
--- a/examples/graph_lenet.cpp
+++ b/examples/graph_lenet.cpp
@@ -93,11 +93,11 @@ void main_graph_lenet(int argc, const char **argv)
}
// Check if OpenCL is available and initialize the scheduler
- Hint hint = Hint::NEON;
+ TargetHint hint = TargetHint::NEON;
if(arm_compute::opencl_is_available())
{
arm_compute::CLScheduler::get().default_init();
- hint = Hint::OPENCL;
+ hint = TargetHint::OPENCL;
}
Graph graph;
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
index 525506f316..25c4577df7 100644
--- a/src/graph/Graph.cpp
+++ b/src/graph/Graph.cpp
@@ -46,7 +46,7 @@ public:
*
* @param _next_hint Device execution hint
*/
- void configure(Hint _next_hint);
+ void configure(GraphHints _next_hints);
/** Sets whether to enable information print out
*
@@ -54,11 +54,12 @@ public:
*/
void set_info_enablement(bool is_enabled);
+ GraphContext _ctx{};
std::vector<Stage> _pipeline{};
std::vector<std::unique_ptr<Tensor>> _tensors{};
std::vector<std::unique_ptr<INode>> _nodes{};
- Hint _current_hint{ Hint::DONT_CARE };
- Hint _next_hint{ Hint::DONT_CARE };
+ GraphHints _current_hints{};
+ GraphHints _next_hints{};
std::unique_ptr<Tensor> _graph_input{ nullptr };
std::unique_ptr<Tensor> _graph_output{ nullptr };
std::unique_ptr<INode> _current_node{ nullptr };
@@ -66,8 +67,8 @@ public:
bool _info_enabled{ false };
private:
- Tensor *_current_input{ nullptr };
- Hint _previous_hint{ Hint::DONT_CARE };
+ Tensor *_current_input{ nullptr };
+ GraphHints _previous_hints{};
};
Graph::~Graph() //NOLINT
@@ -102,7 +103,7 @@ void Graph::run()
}
//Finalize current node's configuration
-void Graph::Private::configure(Hint _next_hint)
+void Graph::Private::configure(GraphHints _next_hints)
{
ARM_COMPUTE_ERROR_ON(_current_node == nullptr);
ARM_COMPUTE_ERROR_ON(_graph_input == nullptr);
@@ -110,9 +111,9 @@ void Graph::Private::configure(Hint _next_hint)
// Is it the first node of the graph ?
if(_current_input == nullptr)
{
- _graph_input->set_target(_current_hint);
- _current_input = _graph_input.get();
- _previous_hint = _current_hint; // For the first node just assume the previous node was of the same type as this one
+ _graph_input->set_target(_current_hints.target_hint());
+ _current_input = _graph_input.get();
+ _previous_hints = _current_hints; // For the first node just assume the previous node was of the same type as this one
}
//Automatic output configuration ?
@@ -123,29 +124,31 @@ void Graph::Private::configure(Hint _next_hint)
}
// If either the writer or reader node needs OpenCL then use OpenCL memory:
- if((_next_hint == Hint::OPENCL || _current_hint == Hint::OPENCL))
+ if((_next_hints.target_hint() == TargetHint::OPENCL || _current_hints.target_hint() == TargetHint::OPENCL))
{
- _current_output->set_target(Hint::OPENCL);
+ _current_output->set_target(TargetHint::OPENCL);
}
else
{
- _current_output->set_target(Hint::NEON);
+ _current_output->set_target(TargetHint::NEON);
}
- // Map input if needed
- std::unique_ptr<arm_compute::IFunction> func = _current_node->instantiate_node(_current_hint, _current_input->tensor(), _current_output->tensor());
+ // Update ctx and instantiate node
+ _ctx.hints() = _current_hints;
+ std::unique_ptr<arm_compute::IFunction> func = _current_node->instantiate_node(_ctx, _current_input->tensor(), _current_output->tensor());
_current_input->allocate();
- if(_current_input->target() == Hint::OPENCL)
+ // Map input if needed
+ if(_current_input->target() == TargetHint::OPENCL)
{
- if(_previous_hint == Hint::NEON)
+ if(_previous_hints.target_hint() == TargetHint::NEON)
{
- ARM_COMPUTE_ERROR_ON(_current_hint == Hint::NEON);
+ ARM_COMPUTE_ERROR_ON(_current_hints.target_hint() == TargetHint::NEON);
_pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLUnmap>(_current_input) });
}
- if(_current_hint == Hint::NEON)
+ if(_current_hints.target_hint() == TargetHint::NEON)
{
- ARM_COMPUTE_ERROR_ON(_previous_hint == Hint::NEON);
+ ARM_COMPUTE_ERROR_ON(_previous_hints.target_hint() == TargetHint::NEON);
_pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLMap>(_current_input, true) });
}
}
@@ -154,8 +157,8 @@ void Graph::Private::configure(Hint _next_hint)
_current_input = _current_output;
_current_output = nullptr;
- _previous_hint = _current_hint;
- _current_hint = _next_hint;
+ std::swap(_previous_hints, _current_hints);
+ std::swap(_current_hints, _next_hints);
}
void Graph::Private::set_info_enablement(bool is_enabled)
@@ -169,12 +172,13 @@ void Graph::add_node(std::unique_ptr<INode> node)
ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_output != nullptr, "Nothing can be added after the output tensor");
//Trigger the creation of the current Node:
- Hint _next_hint = node->override_hint(_pimpl->_next_hint);
- ARM_COMPUTE_ERROR_ON(_next_hint == Hint::DONT_CARE);
+ GraphHints _next_hints = _pimpl->_next_hints;
+ _next_hints.set_target_hint(node->override_target_hint(_pimpl->_next_hints.target_hint()));
+ ARM_COMPUTE_ERROR_ON(_next_hints.target_hint() == TargetHint::DONT_CARE);
if(_pimpl->_current_node)
{
//Finalize the previous Node:
- _pimpl->configure(_pimpl->_next_hint);
+ _pimpl->configure(_pimpl->_next_hints);
if(_pimpl->_info_enabled)
{
@@ -183,8 +187,8 @@ void Graph::add_node(std::unique_ptr<INode> node)
}
else
{
- // If that's the first node then use the same Hint before and after the node.
- _pimpl->_current_hint = _next_hint;
+ // If that's the first node then use the same TargetHint before and after the node.
+ _pimpl->_current_hints = _next_hints;
}
if(_pimpl->_current_node)
{
@@ -192,15 +196,6 @@ void Graph::add_node(std::unique_ptr<INode> node)
}
_pimpl->_current_node = std::move(node);
}
-void Graph::set_hint(Hint hint)
-{
- _pimpl->_next_hint = hint;
-}
-
-void Graph::set_info_enablement(bool is_enabled)
-{
- _pimpl->set_info_enablement(is_enabled);
-}
//Add a tensor with an Accessor (i.e either the input or output of the graph)
void Graph::add_tensor(std::unique_ptr<Tensor> tensor)
@@ -221,7 +216,7 @@ void Graph::add_tensor(std::unique_ptr<Tensor> tensor)
_pimpl->_current_output = _pimpl->_graph_output.get();
// Finalize the graph by configuring the last Node of the graph:
- _pimpl->configure(_pimpl->_current_hint); // Ignore _next_hint as this is the last node, and just use the same hint as before this node.
+ _pimpl->configure(_pimpl->_current_hints); // Ignore _next_hint as this is the last node, and just use the same hint as before this node.
_pimpl->_graph_output->allocate();
}
}
@@ -236,6 +231,16 @@ void Graph::set_temp(TensorInfo &&tmp)
_pimpl->_current_output = _pimpl->_tensors.back().get();
}
+void Graph::set_info_enablement(bool is_enabled)
+{
+ _pimpl->set_info_enablement(is_enabled);
+}
+
+GraphHints &Graph::hints()
+{
+ return _pimpl->_next_hints;
+}
+
Graph &arm_compute::graph::operator<<(Graph &graph, TensorInfo &&info)
{
graph.set_temp(std::move(info));
@@ -248,8 +253,14 @@ Graph &arm_compute::graph::operator<<(Graph &graph, Tensor &&tensor)
return graph;
}
-Graph &arm_compute::graph::operator<<(Graph &graph, Hint hint)
+Graph &arm_compute::graph::operator<<(Graph &graph, TargetHint target_hint)
+{
+ graph.hints().set_target_hint(target_hint);
+ return graph;
+}
+
+Graph &arm_compute::graph::operator<<(Graph &graph, ConvolutionMethodHint conv_method_hint)
{
- graph.set_hint(hint);
+ graph.hints().set_convolution_method_hint(conv_method_hint);
return graph;
}
diff --git a/src/graph/GraphContext.cpp b/src/graph/GraphContext.cpp
new file mode 100644
index 0000000000..bfc6fcdfca
--- /dev/null
+++ b/src/graph/GraphContext.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/GraphContext.h"
+
+using namespace arm_compute::graph;
+
+GraphHints::GraphHints(TargetHint target_hint, ConvolutionMethodHint conv_method_hint)
+ : _target_hint(target_hint), _convolution_method_hint(conv_method_hint)
+{
+}
+
+void GraphHints::set_target_hint(TargetHint target_hint)
+{
+ _target_hint = target_hint;
+}
+
+void GraphHints::set_convolution_method_hint(ConvolutionMethodHint convolution_method)
+{
+ _convolution_method_hint = convolution_method;
+}
+
+TargetHint GraphHints::target_hint() const
+{
+ return _target_hint;
+}
+
+ConvolutionMethodHint GraphHints::convolution_method_hint() const
+{
+ return _convolution_method_hint;
+}
+
+GraphContext::GraphContext()
+ : _hints()
+{
+}
+
+GraphHints &GraphContext::hints()
+{
+ return _hints;
+}
+
+const GraphHints &GraphContext::hints() const
+{
+ return _hints;
+} \ No newline at end of file
diff --git a/src/graph/INode.cpp b/src/graph/INode.cpp
index 6b25022d15..4b383f562b 100644
--- a/src/graph/INode.cpp
+++ b/src/graph/INode.cpp
@@ -21,7 +21,6 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
#include "arm_compute/graph/INode.h"
#include "arm_compute/core/CL/OpenCL.h"
@@ -31,17 +30,20 @@
using namespace arm_compute::graph;
-Hint INode::override_hint(Hint hint) const
+TargetHint INode::override_target_hint(TargetHint target_hint) const
{
- if(hint == Hint::OPENCL && !opencl_is_available())
+ if(target_hint == TargetHint::OPENCL && !opencl_is_available())
{
- hint = Hint::DONT_CARE;
+ target_hint = TargetHint::DONT_CARE;
}
- hint = node_override_hint(hint);
- ARM_COMPUTE_ERROR_ON(hint == Hint::OPENCL && !opencl_is_available());
- return hint;
+ GraphHints hints{ target_hint };
+ target_hint = node_override_hints(hints).target_hint();
+ ARM_COMPUTE_ERROR_ON(target_hint == TargetHint::OPENCL && !opencl_is_available());
+ return target_hint;
}
-Hint INode::node_override_hint(Hint hint) const
+GraphHints INode::node_override_hints(GraphHints hints) const
{
- return hint == Hint::DONT_CARE ? Hint::NEON : hint;
+ TargetHint target_hint = hints.target_hint();
+ hints.set_target_hint((target_hint == TargetHint::DONT_CARE) ? TargetHint::NEON : target_hint);
+ return hints;
}
diff --git a/src/graph/SubTensor.cpp b/src/graph/SubTensor.cpp
index a70f32927b..abf8506c33 100644
--- a/src/graph/SubTensor.cpp
+++ b/src/graph/SubTensor.cpp
@@ -21,7 +21,6 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
#include "arm_compute/graph/SubTensor.h"
#include "arm_compute/core/Error.h"
@@ -45,12 +44,12 @@ std::unique_ptr<ITensor> initialise_subtensor(ITensor *parent, TensorShape shape
} // namespace
SubTensor::SubTensor()
- : _target(Hint::DONT_CARE), _coords(), _info(), _parent(nullptr), _subtensor(nullptr)
+ : _target(TargetHint::DONT_CARE), _coords(), _info(), _parent(nullptr), _subtensor(nullptr)
{
}
SubTensor::SubTensor(Tensor &parent, TensorShape tensor_shape, Coordinates coords)
- : _target(Hint::DONT_CARE), _coords(coords), _info(), _parent(nullptr), _subtensor(nullptr)
+ : _target(TargetHint::DONT_CARE), _coords(coords), _info(), _parent(nullptr), _subtensor(nullptr)
{
ARM_COMPUTE_ERROR_ON(parent.tensor() == nullptr);
_parent = parent.tensor();
@@ -60,7 +59,7 @@ SubTensor::SubTensor(Tensor &parent, TensorShape tensor_shape, Coordinates coord
instantiate_subtensor();
}
-SubTensor::SubTensor(ITensor *parent, TensorShape tensor_shape, Coordinates coords, Hint target)
+SubTensor::SubTensor(ITensor *parent, TensorShape tensor_shape, Coordinates coords, TargetHint target)
: _target(target), _coords(coords), _info(), _parent(parent), _subtensor(nullptr)
{
ARM_COMPUTE_ERROR_ON(parent == nullptr);
@@ -84,7 +83,7 @@ ITensor *SubTensor::tensor()
return _subtensor.get();
}
-Hint SubTensor::target() const
+TargetHint SubTensor::target() const
{
return _target;
}
@@ -93,13 +92,13 @@ void SubTensor::instantiate_subtensor()
{
switch(_target)
{
- case Hint::OPENCL:
+ case TargetHint::OPENCL:
_subtensor = initialise_subtensor<arm_compute::CLSubTensor, arm_compute::ICLTensor>(_parent, _info.tensor_shape(), _coords);
break;
- case Hint::NEON:
+ case TargetHint::NEON:
_subtensor = initialise_subtensor<arm_compute::SubTensor, arm_compute::ITensor>(_parent, _info.tensor_shape(), _coords);
break;
default:
- ARM_COMPUTE_ERROR("Invalid Hint");
+ ARM_COMPUTE_ERROR("Invalid TargetHint");
}
}
diff --git a/src/graph/Tensor.cpp b/src/graph/Tensor.cpp
index c534ae0296..31dd4e86ac 100644
--- a/src/graph/Tensor.cpp
+++ b/src/graph/Tensor.cpp
@@ -21,7 +21,6 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/core/Error.h"
@@ -53,7 +52,7 @@ void tensor_allocate(ITensor &tensor)
} // namespace
Tensor::Tensor(TensorInfo &&info)
- : _target(Hint::DONT_CARE), _info(info), _accessor(nullptr), _tensor(nullptr)
+ : _target(TargetHint::DONT_CARE), _info(info), _accessor(nullptr), _tensor(nullptr)
{
}
@@ -96,7 +95,7 @@ const TensorInfo &Tensor::info() const
return _info;
}
-ITensor *Tensor::set_target(Hint target)
+ITensor *Tensor::set_target(TargetHint target)
{
if(_tensor != nullptr)
{
@@ -106,14 +105,14 @@ ITensor *Tensor::set_target(Hint target)
{
switch(target)
{
- case Hint::OPENCL:
+ case TargetHint::OPENCL:
_tensor = initialise_tensor<arm_compute::CLTensor>(_info);
break;
- case Hint::NEON:
+ case TargetHint::NEON:
_tensor = initialise_tensor<arm_compute::Tensor>(_info);
break;
default:
- ARM_COMPUTE_ERROR("Invalid Hint");
+ ARM_COMPUTE_ERROR("Invalid TargetHint");
}
_target = target;
}
@@ -125,14 +124,14 @@ void Tensor::allocate()
ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor.get());
switch(_target)
{
- case Hint::OPENCL:
+ case TargetHint::OPENCL:
tensor_allocate<arm_compute::CLTensor>(*_tensor);
break;
- case Hint::NEON:
+ case TargetHint::NEON:
tensor_allocate<arm_compute::Tensor>(*_tensor);
break;
default:
- ARM_COMPUTE_ERROR("Invalid Hint");
+ ARM_COMPUTE_ERROR("Invalid TargetHint");
}
}
@@ -145,7 +144,7 @@ void Tensor::allocate_and_fill_if_needed()
}
}
-Hint Tensor::target() const
+TargetHint Tensor::target() const
{
return _target;
}
diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp
index b71e22c601..da2dac04e2 100644
--- a/src/graph/nodes/ActivationLayer.cpp
+++ b/src/graph/nodes/ActivationLayer.cpp
@@ -34,7 +34,7 @@ using namespace arm_compute::graph;
namespace
{
-template <typename ActivationType, typename TensorType, Hint hint>
+template <typename ActivationType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
{
auto activation = arm_compute::support::cpp14::make_unique<ActivationType>();
@@ -46,19 +46,19 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
return std::move(activation);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
{
- return instantiate_function<arm_compute::CLActivationLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, activation_info);
+ return instantiate_function<arm_compute::CLActivationLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, activation_info);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
{
- return instantiate_function<arm_compute::NEActivationLayer, arm_compute::Tensor, Hint::NEON>(input, output, activation_info);
+ return instantiate_function<arm_compute::NEActivationLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, activation_info);
}
} // namespace
@@ -67,27 +67,27 @@ ActivationLayer::ActivationLayer(const ActivationLayerInfo activation_info)
{
}
-std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, output, _activation_info);
+ func = instantiate<TargetHint::OPENCL>(input, output, _activation_info);
}
else
{
- func = instantiate<Hint::NEON>(input, output, _activation_info);
+ func = instantiate<TargetHint::NEON>(input, output, _activation_info);
}
return func;
}
void ActivationLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLActivationLayer";
}
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
index ce9f096719..a992095786 100644
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ b/src/graph/nodes/ConvolutionLayer.cpp
@@ -65,7 +65,7 @@ TensorShape calculate_convolution_layer_output_shape(const TensorShape &input_sh
}
// Instantiate GEMM based convolution layer
-template <typename ConvolutionType, typename TensorType, Hint hint>
+template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
{
auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
@@ -79,7 +79,7 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
}
// Instantiate direct convolution layer
-template <typename ConvolutionType, typename TensorType, Hint hint>
+template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
{
auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
@@ -92,35 +92,37 @@ std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(ITensor *inp
return std::move(conv);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
ConvolutionMethodHint conv_method);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info,
+ ConvolutionMethodHint conv_method)
{
if(conv_method == ConvolutionMethodHint::GEMM)
{
- return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
+ return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
}
else
{
- return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info);
+ return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
}
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info,
+ ConvolutionMethodHint conv_method)
{
if(conv_method == ConvolutionMethodHint::GEMM)
{
- return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info, weights_info);
+ return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info, weights_info);
}
else
{
- return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info);
+ return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
}
}
} // namespace
@@ -166,7 +168,7 @@ private:
std::vector<std::unique_ptr<IFunction>> _convolutions;
};
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
// Set weights and biases info
if(_weights.tensor() == nullptr)
@@ -181,17 +183,18 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint
}
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
+ const ConvolutionMethodHint conv_method_hint = ctx.hints().convolution_method_hint();
// Check if the weights and biases are loaded
bool weights_are_loaded = _weights.tensor() != nullptr;
bool biases_are_loaded = _weights.tensor() != nullptr;
// Set bias and weights target
- _weights.set_target(_hint);
- _biases.set_target(_hint);
+ _weights.set_target(_target_hint);
+ _biases.set_target(_target_hint);
// Calculate output shape
TensorShape output_shape = calculate_convolution_layer_output_shape(_input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
@@ -200,14 +203,13 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint
arm_compute::auto_init_if_empty(*_output->info(), output_shape, 1, _input->info()->data_type(), _input->info()->fixed_point_position());
// Create appropriate convolution function
- // TODO(geopin01): Fix convolution layer hints once the GraphContext has been added
if(_num_groups == 1)
{
- func = instantiate_convolution(ConvolutionMethodHint::GEMM);
+ func = instantiate_convolution(conv_method_hint);
}
else
{
- func = instantiate_grouped_convolution(ConvolutionMethodHint::GEMM);
+ func = instantiate_grouped_convolution(conv_method_hint);
}
// Fill weights
@@ -226,7 +228,7 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint
void ConvolutionLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLConvolutionLayer";
}
@@ -248,13 +250,13 @@ void ConvolutionLayer::print_info()
std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ConvolutionMethodHint conv_method_hint)
{
std::unique_ptr<arm_compute::IFunction> func;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
}
else
{
- func = instantiate<Hint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
}
return func;
}
@@ -306,20 +308,20 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_co
Coordinates biases_coord(biases_split * i);
// Create sub-tensors for input, output, weights and bias
- auto hint_to_use = (_hint == Hint::OPENCL) ? Hint::OPENCL : Hint::NEON;
+ auto hint_to_use = (_target_hint == TargetHint::OPENCL) ? TargetHint::OPENCL : TargetHint::NEON;
_is[i] = SubTensor(_input, input_shape, input_coord, hint_to_use);
_os[i] = SubTensor(_output, output_shape, output_coord, hint_to_use);
_ws[i] = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
_bs[i] = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
// Instantiate convolution function
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
}
else
{
- func = instantiate<Hint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
}
// Add convolution function to the list of convolutions for the grouped convolution
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index fcc86be8fa..c317660b20 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -43,7 +43,7 @@ TensorShape calculate_fullyconnected_layer_output_shape(const TensorShape &input
}
return TensorShape(output_neurons, batches);
}
-template <typename FullyConnectedType, typename TensorType, Hint hint>
+template <typename FullyConnectedType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
{
bool weights_are_loaded = weights.tensor() != nullptr;
@@ -52,8 +52,8 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Ten
auto conv = arm_compute::support::cpp14::make_unique<FullyConnectedType>();
conv->configure(
dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(weights.set_target(hint)),
- dynamic_cast<TensorType *>(biases.set_target(hint)),
+ dynamic_cast<TensorType *>(weights.set_target(target_hint)),
+ dynamic_cast<TensorType *>(biases.set_target(target_hint)),
dynamic_cast<TensorType *>(output));
if(!weights_are_loaded)
{
@@ -67,23 +67,23 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Ten
return std::move(conv);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
{
- return instantiate_function<arm_compute::CLFullyConnectedLayer, arm_compute::CLTensor, Hint::OPENCL>(input, weights, biases, output);
+ return instantiate_function<arm_compute::CLFullyConnectedLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, weights, biases, output);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
{
- return instantiate_function<arm_compute::NEFullyConnectedLayer, arm_compute::Tensor, Hint::NEON>(input, weights, biases, output);
+ return instantiate_function<arm_compute::NEFullyConnectedLayer, arm_compute::Tensor, TargetHint::NEON>(input, weights, biases, output);
}
} // namespace
-std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
if(_weights.tensor() == nullptr)
{
@@ -111,17 +111,17 @@ std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Hi
input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position());
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, _weights, _biases, output);
+ func = instantiate<TargetHint::OPENCL>(input, _weights, _biases, output);
}
else
{
- func = instantiate<Hint::NEON>(input, _weights, _biases, output);
+ func = instantiate<TargetHint::NEON>(input, _weights, _biases, output);
}
return func;
@@ -129,7 +129,7 @@ std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Hi
void FullyConnectedLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLFullyConnectedLayer";
}
diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp
index 55ef9bf243..99d07dc8da 100644
--- a/src/graph/nodes/NormalizationLayer.cpp
+++ b/src/graph/nodes/NormalizationLayer.cpp
@@ -34,7 +34,7 @@ using namespace arm_compute::graph;
namespace
{
-template <typename NormalizationType, typename TensorType, Hint hint>
+template <typename NormalizationType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
{
auto norm = arm_compute::support::cpp14::make_unique<NormalizationType>();
@@ -46,19 +46,19 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
return std::move(norm);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
{
- return instantiate_function<arm_compute::CLNormalizationLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, norm_info);
+ return instantiate_function<arm_compute::CLNormalizationLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, norm_info);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
{
- return instantiate_function<arm_compute::NENormalizationLayer, arm_compute::Tensor, Hint::NEON>(input, output, norm_info);
+ return instantiate_function<arm_compute::NENormalizationLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, norm_info);
}
} // namespace
@@ -67,20 +67,20 @@ NormalizationLayer::NormalizationLayer(const NormalizationLayerInfo norm_info)
{
}
-std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, output, _norm_info);
+ func = instantiate<TargetHint::OPENCL>(input, output, _norm_info);
}
else
{
- func = instantiate<Hint::NEON>(input, output, _norm_info);
+ func = instantiate<TargetHint::NEON>(input, output, _norm_info);
}
return func;
@@ -88,7 +88,7 @@ std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(Hin
void NormalizationLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLNormalizationLayer";
}
diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp
index f29332f65b..2a5e4cb3d8 100644
--- a/src/graph/nodes/PoolingLayer.cpp
+++ b/src/graph/nodes/PoolingLayer.cpp
@@ -34,7 +34,7 @@ using namespace arm_compute::graph;
namespace
{
-template <typename PoolingType, typename TensorType, Hint hint>
+template <typename PoolingType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
{
auto pool = arm_compute::support::cpp14::make_unique<PoolingType>();
@@ -46,19 +46,19 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
return std::move(pool);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
{
- return instantiate_function<arm_compute::CLPoolingLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, pool_info);
+ return instantiate_function<arm_compute::CLPoolingLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, pool_info);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
{
- return instantiate_function<arm_compute::NEPoolingLayer, arm_compute::Tensor, Hint::NEON>(input, output, pool_info);
+ return instantiate_function<arm_compute::NEPoolingLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, pool_info);
}
} // namespace
@@ -67,20 +67,20 @@ PoolingLayer::PoolingLayer(const PoolingLayerInfo pool_info)
{
}
-std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, output, _pool_info);
+ func = instantiate<TargetHint::OPENCL>(input, output, _pool_info);
}
else
{
- func = instantiate<Hint::NEON>(input, output, _pool_info);
+ func = instantiate<TargetHint::NEON>(input, output, _pool_info);
}
return func;
@@ -88,7 +88,7 @@ std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(Hint hint
void PoolingLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLPoolingLayer";
}
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp
index fee88970fc..9e798ef7cc 100644
--- a/src/graph/nodes/SoftmaxLayer.cpp
+++ b/src/graph/nodes/SoftmaxLayer.cpp
@@ -34,7 +34,7 @@ using namespace arm_compute::graph;
namespace
{
-template <typename SoftmaxType, typename TensorType, Hint hint>
+template <typename SoftmaxType, typename TensorType, TargetHint hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output)
{
auto softmax = arm_compute::support::cpp14::make_unique<SoftmaxType>();
@@ -45,36 +45,36 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
return std::move(softmax);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output)
{
- return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output);
+ return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output)
{
- return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::Tensor, Hint::NEON>(input, output);
+ return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::Tensor, TargetHint::NEON>(input, output);
}
} // namespace
-std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, output);
+ func = instantiate<TargetHint::OPENCL>(input, output);
}
else
{
- func = instantiate<Hint::NEON>(input, output);
+ func = instantiate<TargetHint::NEON>(input, output);
}
return func;
@@ -82,7 +82,7 @@ std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(Hint hint
void SoftmaxLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLSoftmaxLayer";
}