aboutsummaryrefslogtreecommitdiff
path: root/src/graph/nodes
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-10-04 16:53:58 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitff421f2100e0e9e532f5fe78585300546af61690 (patch)
tree9ba5a1bfe64b5b10f70c64a965f9c5ca14de9ce3 /src/graph/nodes
parent925ca0f7402115da3bffb21c04fca0bc822c9b38 (diff)
downloadComputeLibrary-ff421f2100e0e9e532f5fe78585300546af61690.tar.gz
COMPMID-601: Add GraphContext
GraphContext hold all the information about the hints that need to be passed in the nodes. As these might expand, it serves as a centralized class for such information. Change-Id: I0b5527630fb97cc5fa500db0bac8307ff2ea36e6 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/90300 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/graph/nodes')
-rw-r--r--src/graph/nodes/ActivationLayer.cpp28
-rw-r--r--src/graph/nodes/ConvolutionLayer.cpp58
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp32
-rw-r--r--src/graph/nodes/NormalizationLayer.cpp28
-rw-r--r--src/graph/nodes/PoolingLayer.cpp28
-rw-r--r--src/graph/nodes/SoftmaxLayer.cpp28
6 files changed, 102 insertions, 100 deletions
diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp
index b71e22c601..da2dac04e2 100644
--- a/src/graph/nodes/ActivationLayer.cpp
+++ b/src/graph/nodes/ActivationLayer.cpp
@@ -34,7 +34,7 @@ using namespace arm_compute::graph;
namespace
{
-template <typename ActivationType, typename TensorType, Hint hint>
+template <typename ActivationType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
{
auto activation = arm_compute::support::cpp14::make_unique<ActivationType>();
@@ -46,19 +46,19 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
return std::move(activation);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
{
- return instantiate_function<arm_compute::CLActivationLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, activation_info);
+ return instantiate_function<arm_compute::CLActivationLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, activation_info);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
{
- return instantiate_function<arm_compute::NEActivationLayer, arm_compute::Tensor, Hint::NEON>(input, output, activation_info);
+ return instantiate_function<arm_compute::NEActivationLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, activation_info);
}
} // namespace
@@ -67,27 +67,27 @@ ActivationLayer::ActivationLayer(const ActivationLayerInfo activation_info)
{
}
-std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, output, _activation_info);
+ func = instantiate<TargetHint::OPENCL>(input, output, _activation_info);
}
else
{
- func = instantiate<Hint::NEON>(input, output, _activation_info);
+ func = instantiate<TargetHint::NEON>(input, output, _activation_info);
}
return func;
}
void ActivationLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLActivationLayer";
}
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
index ce9f096719..a992095786 100644
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ b/src/graph/nodes/ConvolutionLayer.cpp
@@ -65,7 +65,7 @@ TensorShape calculate_convolution_layer_output_shape(const TensorShape &input_sh
}
// Instantiate GEMM based convolution layer
-template <typename ConvolutionType, typename TensorType, Hint hint>
+template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
{
auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
@@ -79,7 +79,7 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
}
// Instantiate direct convolution layer
-template <typename ConvolutionType, typename TensorType, Hint hint>
+template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
{
auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
@@ -92,35 +92,37 @@ std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(ITensor *inp
return std::move(conv);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
ConvolutionMethodHint conv_method);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info,
+ ConvolutionMethodHint conv_method)
{
if(conv_method == ConvolutionMethodHint::GEMM)
{
- return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
+ return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
}
else
{
- return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info);
+ return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
}
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- ConvolutionMethodHint conv_method)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info,
+ ConvolutionMethodHint conv_method)
{
if(conv_method == ConvolutionMethodHint::GEMM)
{
- return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info, weights_info);
+ return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info, weights_info);
}
else
{
- return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info);
+ return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
}
}
} // namespace
@@ -166,7 +168,7 @@ private:
std::vector<std::unique_ptr<IFunction>> _convolutions;
};
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
// Set weights and biases info
if(_weights.tensor() == nullptr)
@@ -181,17 +183,18 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint
}
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
+ const ConvolutionMethodHint conv_method_hint = ctx.hints().convolution_method_hint();
// Check if the weights and biases are loaded
bool weights_are_loaded = _weights.tensor() != nullptr;
bool biases_are_loaded = _weights.tensor() != nullptr;
// Set bias and weights target
- _weights.set_target(_hint);
- _biases.set_target(_hint);
+ _weights.set_target(_target_hint);
+ _biases.set_target(_target_hint);
// Calculate output shape
TensorShape output_shape = calculate_convolution_layer_output_shape(_input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
@@ -200,14 +203,13 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint
arm_compute::auto_init_if_empty(*_output->info(), output_shape, 1, _input->info()->data_type(), _input->info()->fixed_point_position());
// Create appropriate convolution function
- // TODO(geopin01): Fix convolution layer hints once the GraphContext has been added
if(_num_groups == 1)
{
- func = instantiate_convolution(ConvolutionMethodHint::GEMM);
+ func = instantiate_convolution(conv_method_hint);
}
else
{
- func = instantiate_grouped_convolution(ConvolutionMethodHint::GEMM);
+ func = instantiate_grouped_convolution(conv_method_hint);
}
// Fill weights
@@ -226,7 +228,7 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint
void ConvolutionLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLConvolutionLayer";
}
@@ -248,13 +250,13 @@ void ConvolutionLayer::print_info()
std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ConvolutionMethodHint conv_method_hint)
{
std::unique_ptr<arm_compute::IFunction> func;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
}
else
{
- func = instantiate<Hint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
}
return func;
}
@@ -306,20 +308,20 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_co
Coordinates biases_coord(biases_split * i);
// Create sub-tensors for input, output, weights and bias
- auto hint_to_use = (_hint == Hint::OPENCL) ? Hint::OPENCL : Hint::NEON;
+ auto hint_to_use = (_target_hint == TargetHint::OPENCL) ? TargetHint::OPENCL : TargetHint::NEON;
_is[i] = SubTensor(_input, input_shape, input_coord, hint_to_use);
_os[i] = SubTensor(_output, output_shape, output_coord, hint_to_use);
_ws[i] = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
_bs[i] = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
// Instantiate convolution function
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
}
else
{
- func = instantiate<Hint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+ func = instantiate<TargetHint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
}
// Add convolution function to the list of convolutions for the grouped convolution
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index fcc86be8fa..c317660b20 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -43,7 +43,7 @@ TensorShape calculate_fullyconnected_layer_output_shape(const TensorShape &input
}
return TensorShape(output_neurons, batches);
}
-template <typename FullyConnectedType, typename TensorType, Hint hint>
+template <typename FullyConnectedType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
{
bool weights_are_loaded = weights.tensor() != nullptr;
@@ -52,8 +52,8 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Ten
auto conv = arm_compute::support::cpp14::make_unique<FullyConnectedType>();
conv->configure(
dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(weights.set_target(hint)),
- dynamic_cast<TensorType *>(biases.set_target(hint)),
+ dynamic_cast<TensorType *>(weights.set_target(target_hint)),
+ dynamic_cast<TensorType *>(biases.set_target(target_hint)),
dynamic_cast<TensorType *>(output));
if(!weights_are_loaded)
{
@@ -67,23 +67,23 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Ten
return std::move(conv);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
{
- return instantiate_function<arm_compute::CLFullyConnectedLayer, arm_compute::CLTensor, Hint::OPENCL>(input, weights, biases, output);
+ return instantiate_function<arm_compute::CLFullyConnectedLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, weights, biases, output);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
{
- return instantiate_function<arm_compute::NEFullyConnectedLayer, arm_compute::Tensor, Hint::NEON>(input, weights, biases, output);
+ return instantiate_function<arm_compute::NEFullyConnectedLayer, arm_compute::Tensor, TargetHint::NEON>(input, weights, biases, output);
}
} // namespace
-std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
if(_weights.tensor() == nullptr)
{
@@ -111,17 +111,17 @@ std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Hi
input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position());
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, _weights, _biases, output);
+ func = instantiate<TargetHint::OPENCL>(input, _weights, _biases, output);
}
else
{
- func = instantiate<Hint::NEON>(input, _weights, _biases, output);
+ func = instantiate<TargetHint::NEON>(input, _weights, _biases, output);
}
return func;
@@ -129,7 +129,7 @@ std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Hi
void FullyConnectedLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLFullyConnectedLayer";
}
diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp
index 55ef9bf243..99d07dc8da 100644
--- a/src/graph/nodes/NormalizationLayer.cpp
+++ b/src/graph/nodes/NormalizationLayer.cpp
@@ -34,7 +34,7 @@ using namespace arm_compute::graph;
namespace
{
-template <typename NormalizationType, typename TensorType, Hint hint>
+template <typename NormalizationType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
{
auto norm = arm_compute::support::cpp14::make_unique<NormalizationType>();
@@ -46,19 +46,19 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
return std::move(norm);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
{
- return instantiate_function<arm_compute::CLNormalizationLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, norm_info);
+ return instantiate_function<arm_compute::CLNormalizationLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, norm_info);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
{
- return instantiate_function<arm_compute::NENormalizationLayer, arm_compute::Tensor, Hint::NEON>(input, output, norm_info);
+ return instantiate_function<arm_compute::NENormalizationLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, norm_info);
}
} // namespace
@@ -67,20 +67,20 @@ NormalizationLayer::NormalizationLayer(const NormalizationLayerInfo norm_info)
{
}
-std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, output, _norm_info);
+ func = instantiate<TargetHint::OPENCL>(input, output, _norm_info);
}
else
{
- func = instantiate<Hint::NEON>(input, output, _norm_info);
+ func = instantiate<TargetHint::NEON>(input, output, _norm_info);
}
return func;
@@ -88,7 +88,7 @@ std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(Hin
void NormalizationLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLNormalizationLayer";
}
diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp
index f29332f65b..2a5e4cb3d8 100644
--- a/src/graph/nodes/PoolingLayer.cpp
+++ b/src/graph/nodes/PoolingLayer.cpp
@@ -34,7 +34,7 @@ using namespace arm_compute::graph;
namespace
{
-template <typename PoolingType, typename TensorType, Hint hint>
+template <typename PoolingType, typename TensorType, TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
{
auto pool = arm_compute::support::cpp14::make_unique<PoolingType>();
@@ -46,19 +46,19 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
return std::move(pool);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
{
- return instantiate_function<arm_compute::CLPoolingLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, pool_info);
+ return instantiate_function<arm_compute::CLPoolingLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, pool_info);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
{
- return instantiate_function<arm_compute::NEPoolingLayer, arm_compute::Tensor, Hint::NEON>(input, output, pool_info);
+ return instantiate_function<arm_compute::NEPoolingLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, pool_info);
}
} // namespace
@@ -67,20 +67,20 @@ PoolingLayer::PoolingLayer(const PoolingLayerInfo pool_info)
{
}
-std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, output, _pool_info);
+ func = instantiate<TargetHint::OPENCL>(input, output, _pool_info);
}
else
{
- func = instantiate<Hint::NEON>(input, output, _pool_info);
+ func = instantiate<TargetHint::NEON>(input, output, _pool_info);
}
return func;
@@ -88,7 +88,7 @@ std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(Hint hint
void PoolingLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLPoolingLayer";
}
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp
index fee88970fc..9e798ef7cc 100644
--- a/src/graph/nodes/SoftmaxLayer.cpp
+++ b/src/graph/nodes/SoftmaxLayer.cpp
@@ -34,7 +34,7 @@ using namespace arm_compute::graph;
namespace
{
-template <typename SoftmaxType, typename TensorType, Hint hint>
+template <typename SoftmaxType, typename TensorType, TargetHint hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output)
{
auto softmax = arm_compute::support::cpp14::make_unique<SoftmaxType>();
@@ -45,36 +45,36 @@ std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITe
return std::move(softmax);
}
-template <Hint hint>
+template <TargetHint target_hint>
std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output)
{
- return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output);
+ return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output);
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output)
{
- return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::Tensor, Hint::NEON>(input, output);
+ return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::Tensor, TargetHint::NEON>(input, output);
}
} // namespace
-std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
{
std::unique_ptr<arm_compute::IFunction> func;
- _hint = hint;
- _input = input;
- _output = output;
+ _target_hint = ctx.hints().target_hint();
+ _input = input;
+ _output = output;
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
- func = instantiate<Hint::OPENCL>(input, output);
+ func = instantiate<TargetHint::OPENCL>(input, output);
}
else
{
- func = instantiate<Hint::NEON>(input, output);
+ func = instantiate<TargetHint::NEON>(input, output);
}
return func;
@@ -82,7 +82,7 @@ std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(Hint hint
void SoftmaxLayer::print_info()
{
- if(_hint == Hint::OPENCL)
+ if(_target_hint == TargetHint::OPENCL)
{
std::cout << "Instantiating CLSoftmaxLayer";
}