aboutsummaryrefslogtreecommitdiff
path: root/src/graph
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-09-26 12:32:57 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit6f669f039fb74675b858bc3703295609a6a3e122 (patch)
tree704847bbebb2439f68309680bd4f4142b876c179 /src/graph
parent1682430e220eb609752c650f85c0f96e375b6d6a (diff)
downloadComputeLibrary-6f669f039fb74675b858bc3703295609a6a3e122.tar.gz
COMPMID-417: Add grouping in convolution layer
-Adds grouping support in convolution layer -Adds Normalization layer node in graph -Adds alexnet example -Fixes FullyConnectedLayer output autoconfigure (works only for 1d batch space) Change-Id: I5bd75f9a8b08cfd68f7c34745150266c2bc4221f Reviewed-on: http://mpd-gerrit.cambridge.arm.com/89518 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/graph')
-rw-r--r--src/graph/SubTensor.cpp105
-rw-r--r--src/graph/nodes/ConvolutionLayer.cpp263
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp24
-rw-r--r--src/graph/nodes/NormalizationLayer.cpp105
4 files changed, 468 insertions, 29 deletions
diff --git a/src/graph/SubTensor.cpp b/src/graph/SubTensor.cpp
new file mode 100644
index 0000000000..a70f32927b
--- /dev/null
+++ b/src/graph/SubTensor.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/graph/SubTensor.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/CL/CLSubTensor.h"
+#include "arm_compute/runtime/SubTensor.h"
+#include "utils/TypePrinter.h"
+
+using namespace arm_compute::graph;
+
+namespace
+{
+template <typename SubTensorType, typename ParentTensorType>
+std::unique_ptr<ITensor> initialise_subtensor(ITensor *parent, TensorShape shape, Coordinates coords)
+{
+ auto ptensor = dynamic_cast<ParentTensorType *>(parent);
+ auto subtensor = arm_compute::support::cpp14::make_unique<SubTensorType>(ptensor, shape, coords);
+ return std::move(subtensor);
+}
+} // namespace
+
+SubTensor::SubTensor()
+ : _target(Hint::DONT_CARE), _coords(), _info(), _parent(nullptr), _subtensor(nullptr)
+{
+}
+
+SubTensor::SubTensor(Tensor &parent, TensorShape tensor_shape, Coordinates coords)
+ : _target(Hint::DONT_CARE), _coords(coords), _info(), _parent(nullptr), _subtensor(nullptr)
+{
+ ARM_COMPUTE_ERROR_ON(parent.tensor() == nullptr);
+ _parent = parent.tensor();
+ _info = SubTensorInfo(parent.tensor()->info(), tensor_shape, coords);
+ _target = parent.target();
+
+ instantiate_subtensor();
+}
+
+SubTensor::SubTensor(ITensor *parent, TensorShape tensor_shape, Coordinates coords, Hint target)
+ : _target(target), _coords(coords), _info(), _parent(parent), _subtensor(nullptr)
+{
+ ARM_COMPUTE_ERROR_ON(parent == nullptr);
+ _info = SubTensorInfo(parent->info(), tensor_shape, coords);
+
+ instantiate_subtensor();
+}
+
+void SubTensor::set_info(SubTensorInfo &&info)
+{
+ _info = info;
+}
+
+const SubTensorInfo &SubTensor::info() const
+{
+ return _info;
+}
+
+ITensor *SubTensor::tensor()
+{
+ return _subtensor.get();
+}
+
+Hint SubTensor::target() const
+{
+ return _target;
+}
+
+void SubTensor::instantiate_subtensor()
+{
+ switch(_target)
+ {
+ case Hint::OPENCL:
+ _subtensor = initialise_subtensor<arm_compute::CLSubTensor, arm_compute::ICLTensor>(_parent, _info.tensor_shape(), _coords);
+ break;
+ case Hint::NEON:
+ _subtensor = initialise_subtensor<arm_compute::SubTensor, arm_compute::ITensor>(_parent, _info.tensor_shape(), _coords);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Invalid Hint");
+ }
+}
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
index b80bf93eff..ce9f096719 100644
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ b/src/graph/nodes/ConvolutionLayer.cpp
@@ -24,60 +24,155 @@
#include "arm_compute/graph/nodes/ConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
+#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
#include "support/ToolchainSupport.h"
+#include "utils/GraphTypePrinter.h"
#include "utils/TypePrinter.h"
+#include <tuple>
+#include <vector>
+
using namespace arm_compute::graph;
namespace
{
-template <typename ConvolutionType, typename TensorType, Hint hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+/** Calculates the output shaped of the convolution layer
+ *
+ * @param[in] input_shape Input tensor shape
+ * @param[in] weights_shape Weights shape
+ * @param[in] conv_info Convolution information (padding, stride, etc.)
+ *
+ * @return The expected output tensor shape
+ */
+TensorShape calculate_convolution_layer_output_shape(const TensorShape &input_shape, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
{
- bool weights_are_loaded = weights.tensor() != nullptr;
- bool biases_are_loaded = biases.tensor() != nullptr;
+ unsigned int output_width = 0;
+ unsigned int output_height = 0;
+
+ // Get output width and height
+ std::tie(output_width, output_height) = arm_compute::scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), conv_info);
+ // Create output shape
+ TensorShape output_shape = input_shape;
+ output_shape.set(0, output_width);
+ output_shape.set(1, output_height);
+ output_shape.set(2, weights_shape[3]);
+
+ return output_shape;
+}
+
+// Instantiate GEMM based convolution layer
+template <typename ConvolutionType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+{
auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
conv->configure(
dynamic_cast<TensorType *>(input),
- dynamic_cast<TensorType *>(weights.set_target(hint)),
- dynamic_cast<TensorType *>(biases.set_target(hint)),
+ dynamic_cast<TensorType *>(weights),
+ dynamic_cast<TensorType *>(biases),
dynamic_cast<TensorType *>(output),
conv_info, weights_info);
- if(!weights_are_loaded)
- {
- weights.allocate_and_fill_if_needed();
- }
- if(!biases_are_loaded)
- {
- biases.allocate_and_fill_if_needed();
- }
+ return std::move(conv);
+}
+// Instantiate direct convolution layer
+template <typename ConvolutionType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
+{
+ auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
+ conv->configure(
+ dynamic_cast<TensorType *>(input),
+ dynamic_cast<TensorType *>(weights),
+ dynamic_cast<TensorType *>(biases),
+ dynamic_cast<TensorType *>(output),
+ conv_info);
return std::move(conv);
}
template <Hint hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info);
+std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
+ ConvolutionMethodHint conv_method);
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
+ ConvolutionMethodHint conv_method)
{
- return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::CLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
+ if(conv_method == ConvolutionMethodHint::GEMM)
+ {
+ return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
+ }
+ else
+ {
+ return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info);
+ }
}
template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
+ ConvolutionMethodHint conv_method)
{
- return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::Tensor, Hint::NEON>(input, weights, biases, output, conv_info, weights_info);
+ if(conv_method == ConvolutionMethodHint::GEMM)
+ {
+ return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info, weights_info);
+ }
+ else
+ {
+ return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info);
+ }
}
} // namespace
+/** Grouped Convolution function */
+class GroupedConvolutionFunction final : public arm_compute::IFunction
+{
+public:
+ /** Default Constructor */
+ GroupedConvolutionFunction()
+ : _convolutions()
+ {
+ }
+ /** Default Destructor */
+ ~GroupedConvolutionFunction() final = default;
+ /** Prevent instances from being copy constructed */
+ GroupedConvolutionFunction(const GroupedConvolutionFunction &) = delete;
+ /** Prevent instances from being copy assigned */
+ GroupedConvolutionFunction &operator=(const GroupedConvolutionFunction &) = delete;
+ /** Allow instances to be move constructed */
+ GroupedConvolutionFunction(GroupedConvolutionFunction &&) noexcept = default;
+ /** Allow instances to be move assigned */
+ GroupedConvolutionFunction &operator=(GroupedConvolutionFunction &&) noexcept = default;
+ /** Adds a convolution
+ *
+ * @param convolution Convolution function to add
+ */
+ void add_convolution_function(std::unique_ptr<IFunction> convolution)
+ {
+ _convolutions.emplace_back(std::move(convolution));
+ }
+
+ // Inherited methods overriden:
+ void run() override
+ {
+ for(auto &c : _convolutions)
+ {
+ c->run();
+ }
+ }
+
+private:
+ std::vector<std::unique_ptr<IFunction>> _convolutions;
+};
+
std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
{
+ // Set weights and biases info
if(_weights.tensor() == nullptr)
{
- _weights.set_info(TensorInfo(TensorShape(_conv_width, _conv_height, input->info()->dimension(2), _ofm), input->info()->num_channels(), input->info()->data_type(),
+ _weights.set_info(TensorInfo(TensorShape(_conv_width, _conv_height, input->info()->dimension(2) / _num_groups, _ofm),
+ input->info()->num_channels(), input->info()->data_type(),
input->info()->fixed_point_position()));
}
if(_biases.tensor() == nullptr)
@@ -90,13 +185,40 @@ std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint
_input = input;
_output = output;
- if(_hint == Hint::OPENCL)
+ // Check if the weights and biases are loaded
+ bool weights_are_loaded = _weights.tensor() != nullptr;
+ bool biases_are_loaded = _weights.tensor() != nullptr;
+
+ // Set bias and weights target
+ _weights.set_target(_hint);
+ _biases.set_target(_hint);
+
+ // Calculate output shape
+ TensorShape output_shape = calculate_convolution_layer_output_shape(_input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
+
+ // Output auto inizialitation if not yet initialized
+ arm_compute::auto_init_if_empty(*_output->info(), output_shape, 1, _input->info()->data_type(), _input->info()->fixed_point_position());
+
+ // Create appropriate convolution function
+ // TODO(geopin01): Fix convolution layer hints once the GraphContext has been added
+ if(_num_groups == 1)
{
- func = instantiate<Hint::OPENCL>(input, _weights, _biases, output, _conv_info, _weights_info);
+ func = instantiate_convolution(ConvolutionMethodHint::GEMM);
}
else
{
- func = instantiate<Hint::NEON>(input, _weights, _biases, output, _conv_info, _weights_info);
+ func = instantiate_grouped_convolution(ConvolutionMethodHint::GEMM);
+ }
+
+ // Fill weights
+ if(!weights_are_loaded)
+ {
+ _weights.allocate_and_fill_if_needed();
+ }
+ // Fill biases
+ if(!biases_are_loaded)
+ {
+ _biases.allocate_and_fill_if_needed();
}
return func;
@@ -112,6 +234,97 @@ void ConvolutionLayer::print_info()
{
std::cout << "Instantiating NEConvolutionLayer";
}
- std::cout << " Type: " << _input->info()->data_type() << " Input Shape: " << _input->info()->tensor_shape() << " Weights shape: " << _weights.info().tensor_shape() << " Biases Shape: " <<
- _biases.info().tensor_shape() << " Output Shape: " << _output->info()->tensor_shape() << " PadStrideInfo: " << _conv_info << "WeightsInfo: " << _weights_info << std::endl;
+ std::cout << " Data Type: " << _input->info()->data_type()
+ << " Input Shape: " << _input->info()->tensor_shape()
+ << " Weights shape: " << _weights.info().tensor_shape()
+ << " Biases Shape: " << _biases.info().tensor_shape()
+ << " Output Shape: " << _output->info()->tensor_shape()
+ << " PadStrideInfo: " << _conv_info
+ << " Groups: " << _num_groups
+ << " WeightsInfo: " << _weights_info
+ << std::endl;
+}
+
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ConvolutionMethodHint conv_method_hint)
+{
+ std::unique_ptr<arm_compute::IFunction> func;
+ if(_hint == Hint::OPENCL)
+ {
+ func = instantiate<Hint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+ }
+ else
+ {
+ func = instantiate<Hint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+ }
+ return func;
+}
+
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_convolution(ConvolutionMethodHint conv_method_hint)
+{
+ // Get tensor shapes
+ TensorShape input_shape = _input->info()->tensor_shape();
+ TensorShape output_shape = _output->info()->tensor_shape();
+ TensorShape weights_shape = _weights.info().tensor_shape();
+ TensorShape biases_shape = _biases.info().tensor_shape();
+
+ ARM_COMPUTE_ERROR_ON_MSG((input_shape.z() % _num_groups) != 0, "Input depth not multiple of the number of groups!");
+ ARM_COMPUTE_ERROR_ON_MSG((output_shape.z() % _num_groups) != 0, "Output depth not multiple of the number of groups!");
+ ARM_COMPUTE_ERROR_ON_MSG((weights_shape[3] % _num_groups) != 0, "Number of kernels not multiple of the number of groups!");
+ ARM_COMPUTE_ERROR_ON_MSG((biases_shape.x() % _num_groups) != 0, "Biases not multiple of the number of groups!");
+
+ // Create a grouped convolution function
+ auto grouped_conv = arm_compute::support::cpp14::make_unique<GroupedConvolutionFunction>();
+
+ // Create sub-tensors vectors
+ _is = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
+ _os = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
+ _ws = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
+ _bs = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
+
+ // Calculate sub-tensor splits
+ const int input_split = input_shape.z() / _num_groups;
+ const int output_split = output_shape.z() / _num_groups;
+ const int weights_split = weights_shape[3] / _num_groups;
+ const int biases_split = biases_shape.x() / _num_groups;
+
+ // Calculate sub-tensor shapes
+ input_shape.set(2, input_split);
+ output_shape.set(2, output_split);
+ weights_shape.set(3, weights_split);
+ biases_shape.set(0, biases_split);
+
+ // Configure sub-tensors
+ for(int i = 0; i < static_cast<int>(_num_groups); ++i)
+ {
+ // Create convolution function
+ std::unique_ptr<arm_compute::IFunction> func;
+
+ // Calculate sub-tensors starting coordinates
+ Coordinates input_coord(0, 0, input_split * i);
+ Coordinates output_coord(0, 0, output_split * i);
+ Coordinates weights_coord(0, 0, 0, weights_split * i);
+ Coordinates biases_coord(biases_split * i);
+
+ // Create sub-tensors for input, output, weights and bias
+ auto hint_to_use = (_hint == Hint::OPENCL) ? Hint::OPENCL : Hint::NEON;
+ _is[i] = SubTensor(_input, input_shape, input_coord, hint_to_use);
+ _os[i] = SubTensor(_output, output_shape, output_coord, hint_to_use);
+ _ws[i] = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
+ _bs[i] = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
+
+ // Instantiate convolution function
+ if(_hint == Hint::OPENCL)
+ {
+ func = instantiate<Hint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+ }
+ else
+ {
+ func = instantiate<Hint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+ }
+
+ // Add convolution function to the list of convolutions for the grouped convolution
+ grouped_conv->add_convolution_function(std::move(func));
+ }
+
+ return std::move(grouped_conv);
}
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index 8d244cb515..fcc86be8fa 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -33,6 +33,16 @@ using namespace arm_compute::graph;
namespace
{
+TensorShape calculate_fullyconnected_layer_output_shape(const TensorShape &input_shape, unsigned int output_neurons)
+{
+ // Note: Only 1D batch space is supported at the moment
+ unsigned int batches = input_shape[1];
+ if(input_shape.num_dimensions() > 2)
+ {
+ batches = input_shape[3];
+ }
+ return TensorShape(output_neurons, batches);
+}
template <typename FullyConnectedType, typename TensorType, Hint hint>
std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
{
@@ -95,8 +105,10 @@ std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Hi
_biases.set_info(TensorInfo(TensorShape(_num_neurons), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()));
}
- arm_compute::auto_init_if_empty(*output->info(), TensorShape(_num_neurons, input->info()->dimension(1)), input->info()->num_channels(), input->info()->data_type(),
- input->info()->fixed_point_position());
+ // Auto configure output
+ arm_compute::auto_init_if_empty(*output->info(),
+ calculate_fullyconnected_layer_output_shape(input->info()->tensor_shape(), _num_neurons),
+ input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position());
std::unique_ptr<arm_compute::IFunction> func;
_hint = hint;
@@ -125,6 +137,10 @@ void FullyConnectedLayer::print_info()
{
std::cout << "Instantiating NEFullyConnectedLayer";
}
- std::cout << " Type: " << _input->info()->data_type() << " Input Shape: " << _input->info()->tensor_shape() << " Weights shape: " << _weights.info().tensor_shape() << " Biases Shape: " <<
- _biases.info().tensor_shape() << " Output Shape: " << _output->info()->tensor_shape() << std::endl;
+ std::cout << " Type: " << _input->info()->data_type()
+ << " Input Shape: " << _input->info()->tensor_shape()
+ << " Weights shape: " << _weights.info().tensor_shape()
+ << " Biases Shape: " << _biases.info().tensor_shape()
+ << " Output Shape: " << _output->info()->tensor_shape()
+ << std::endl;
}
diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp
new file mode 100644
index 0000000000..55ef9bf243
--- /dev/null
+++ b/src/graph/nodes/NormalizationLayer.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/NormalizationLayer.h"
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLNormalizationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "support/ToolchainSupport.h"
+#include "utils/TypePrinter.h"
+
+using namespace arm_compute::graph;
+
+namespace
+{
+template <typename NormalizationType, typename TensorType, Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
+{
+ auto norm = arm_compute::support::cpp14::make_unique<NormalizationType>();
+ norm->configure(
+ dynamic_cast<TensorType *>(input),
+ dynamic_cast<TensorType *>(output),
+ norm_info);
+
+ return std::move(norm);
+}
+
+template <Hint hint>
+std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info);
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
+{
+ return instantiate_function<arm_compute::CLNormalizationLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, norm_info);
+}
+
+template <>
+std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
+{
+ return instantiate_function<arm_compute::NENormalizationLayer, arm_compute::Tensor, Hint::NEON>(input, output, norm_info);
+}
+} // namespace
+
+NormalizationLayer::NormalizationLayer(const NormalizationLayerInfo norm_info)
+ : _norm_info(norm_info)
+{
+}
+
+std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+{
+ std::unique_ptr<arm_compute::IFunction> func;
+ _hint = hint;
+ _input = input;
+ _output = output;
+
+ if(_hint == Hint::OPENCL)
+ {
+ func = instantiate<Hint::OPENCL>(input, output, _norm_info);
+ }
+ else
+ {
+ func = instantiate<Hint::NEON>(input, output, _norm_info);
+ }
+
+ return func;
+}
+
+void NormalizationLayer::print_info()
+{
+ if(_hint == Hint::OPENCL)
+ {
+ std::cout << "Instantiating CLNormalizationLayer";
+ }
+ else
+ {
+ std::cout << "Instantiating NENormalizationLayer";
+ }
+
+ std::cout << " Data Type: " << _input->info()->data_type()
+ << " Input shape: " << _input->info()->tensor_shape()
+ << " Output shape: " << _output->info()->tensor_shape()
+ << " Normalization info: " << _norm_info
+ << std::endl;
+}