aboutsummaryrefslogtreecommitdiff
path: root/src/graph/nodes
diff options
context:
space:
mode:
authorFelix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-27 17:46:17 +0100
committerfelixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-28 12:08:05 +0000
commitafd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch)
tree03bc7d5a762099989b16a656fa8d397b490ed70e /src/graph/nodes
parentbdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff)
downloadComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz
Apply clang-format on repository
Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'src/graph/nodes')
-rw-r--r--src/graph/nodes/ActivationLayerNode.cpp4
-rw-r--r--src/graph/nodes/ArgMinMaxLayerNode.cpp17
-rw-r--r--src/graph/nodes/BatchNormalizationLayerNode.cpp4
-rw-r--r--src/graph/nodes/BoundingBoxTransformLayerNode.cpp8
-rw-r--r--src/graph/nodes/ChannelShuffleLayerNode.cpp7
-rw-r--r--src/graph/nodes/ConcatenateLayerNode.cpp22
-rw-r--r--src/graph/nodes/ConstNode.cpp5
-rw-r--r--src/graph/nodes/ConvolutionLayerNode.cpp17
-rw-r--r--src/graph/nodes/DeconvolutionLayerNode.cpp10
-rw-r--r--src/graph/nodes/DepthToSpaceLayerNode.cpp11
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayerNode.cpp24
-rw-r--r--src/graph/nodes/DequantizationLayerNode.cpp4
-rw-r--r--src/graph/nodes/DetectionOutputLayerNode.cpp9
-rw-r--r--src/graph/nodes/DetectionPostProcessLayerNode.cpp11
-rw-r--r--src/graph/nodes/DummyNode.cpp7
-rw-r--r--src/graph/nodes/EltwiseLayerNode.cpp11
-rw-r--r--src/graph/nodes/FlattenLayerNode.cpp4
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp29
-rw-r--r--src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp29
-rw-r--r--src/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.cpp40
-rw-r--r--src/graph/nodes/GenerateProposalsLayerNode.cpp14
-rw-r--r--src/graph/nodes/InputNode.cpp5
-rw-r--r--src/graph/nodes/L2NormalizeLayerNode.cpp13
-rw-r--r--src/graph/nodes/NormalizationLayerNode.cpp7
-rw-r--r--src/graph/nodes/NormalizePlanarYUVLayerNode.cpp2
-rw-r--r--src/graph/nodes/PReluLayerNode.cpp2
-rw-r--r--src/graph/nodes/PadLayerNode.cpp10
-rw-r--r--src/graph/nodes/PermuteLayerNode.cpp12
-rw-r--r--src/graph/nodes/PoolingLayerNode.cpp10
-rw-r--r--src/graph/nodes/PrintLayerNode.cpp8
-rw-r--r--src/graph/nodes/PriorBoxLayerNode.cpp7
-rw-r--r--src/graph/nodes/QuantizationLayerNode.cpp2
-rw-r--r--src/graph/nodes/ROIAlignLayerNode.cpp10
-rw-r--r--src/graph/nodes/ReductionLayerNode.cpp9
-rw-r--r--src/graph/nodes/ReorgLayerNode.cpp13
-rw-r--r--src/graph/nodes/ReshapeLayer.cpp10
-rw-r--r--src/graph/nodes/ResizeLayerNode.cpp4
-rw-r--r--src/graph/nodes/SliceLayerNode.cpp10
-rw-r--r--src/graph/nodes/SoftmaxLayerNode.cpp7
-rw-r--r--src/graph/nodes/SplitLayerNode.cpp26
-rw-r--r--src/graph/nodes/StackLayerNode.cpp18
-rw-r--r--src/graph/nodes/StridedSliceLayerNode.cpp2
42 files changed, 247 insertions, 227 deletions
diff --git a/src/graph/nodes/ActivationLayerNode.cpp b/src/graph/nodes/ActivationLayerNode.cpp
index cf65d83a5e..1773afcb16 100644
--- a/src/graph/nodes/ActivationLayerNode.cpp
+++ b/src/graph/nodes/ActivationLayerNode.cpp
@@ -44,7 +44,7 @@ ActivationLayerInfo ActivationLayerNode::activation_info() const
bool ActivationLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -63,7 +63,7 @@ TensorDescriptor ActivationLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(src == nullptr);
TensorDescriptor output_info = src->desc();
- if(!_out_quant_info.empty())
+ if (!_out_quant_info.empty())
{
output_info.quant_info = _out_quant_info;
}
diff --git a/src/graph/nodes/ArgMinMaxLayerNode.cpp b/src/graph/nodes/ArgMinMaxLayerNode.cpp
index 63163b9e2c..5adebc950a 100644
--- a/src/graph/nodes/ArgMinMaxLayerNode.cpp
+++ b/src/graph/nodes/ArgMinMaxLayerNode.cpp
@@ -23,16 +23,18 @@
*/
#include "arm_compute/graph/nodes/ArgMinMaxLayerNode.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-
namespace arm_compute
{
namespace graph
{
-ArgMinMaxLayerNode::ArgMinMaxLayerNode(ReductionOperation op, unsigned int axis, DataType out_data_type, QuantizationInfo out_quant_info)
+ArgMinMaxLayerNode::ArgMinMaxLayerNode(ReductionOperation op,
+ unsigned int axis,
+ DataType out_data_type,
+ QuantizationInfo out_quant_info)
: _op(op), _axis(axis), _out_data_type(out_data_type), _out_quant_info(std::move(out_quant_info))
{
_input_edges.resize(1, EmptyEdgeID);
@@ -56,7 +58,7 @@ DataType ArgMinMaxLayerNode::out_data_type() const
bool ArgMinMaxLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -75,17 +77,18 @@ TensorDescriptor ArgMinMaxLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(src == nullptr);
TensorDescriptor output_info = src->desc();
- if(!_out_quant_info.empty())
+ if (!_out_quant_info.empty())
{
output_info.quant_info = _out_quant_info;
}
- if(_out_data_type != DataType::UNKNOWN)
+ if (_out_data_type != DataType::UNKNOWN)
{
output_info.data_type = _out_data_type;
}
- TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(output_info.shape, _axis, false);
+ TensorShape output_shape =
+ arm_compute::misc::shape_calculator::compute_reduced_shape(output_info.shape, _axis, false);
output_info.set_shape(output_shape);
return output_info;
diff --git a/src/graph/nodes/BatchNormalizationLayerNode.cpp b/src/graph/nodes/BatchNormalizationLayerNode.cpp
index ceca0e2715..c317123e8d 100644
--- a/src/graph/nodes/BatchNormalizationLayerNode.cpp
+++ b/src/graph/nodes/BatchNormalizationLayerNode.cpp
@@ -55,7 +55,7 @@ void BatchNormalizationLayerNode::set_fused_activation(ActivationLayerInfo fused
bool BatchNormalizationLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -86,4 +86,4 @@ void BatchNormalizationLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/BoundingBoxTransformLayerNode.cpp b/src/graph/nodes/BoundingBoxTransformLayerNode.cpp
index f3f4f91075..8e52174639 100644
--- a/src/graph/nodes/BoundingBoxTransformLayerNode.cpp
+++ b/src/graph/nodes/BoundingBoxTransformLayerNode.cpp
@@ -23,17 +23,15 @@
*/
#include "arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h"
+#include "arm_compute/core/Helpers.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
-#include "arm_compute/core/Helpers.h"
-
namespace arm_compute
{
namespace graph
{
-BoundingBoxTransformLayerNode::BoundingBoxTransformLayerNode(BoundingBoxTransformInfo &info)
- : _bbox_info(info)
+BoundingBoxTransformLayerNode::BoundingBoxTransformLayerNode(BoundingBoxTransformInfo &info) : _bbox_info(info)
{
_input_edges.resize(2, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -46,7 +44,7 @@ const BoundingBoxTransformInfo &BoundingBoxTransformLayerNode::info() const
bool BoundingBoxTransformLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/ChannelShuffleLayerNode.cpp b/src/graph/nodes/ChannelShuffleLayerNode.cpp
index 5102e4b6da..3cb9e23eca 100644
--- a/src/graph/nodes/ChannelShuffleLayerNode.cpp
+++ b/src/graph/nodes/ChannelShuffleLayerNode.cpp
@@ -30,8 +30,7 @@ namespace arm_compute
{
namespace graph
{
-ChannelShuffleLayerNode::ChannelShuffleLayerNode(unsigned int num_groups)
- : _num_groups(num_groups)
+ChannelShuffleLayerNode::ChannelShuffleLayerNode(unsigned int num_groups) : _num_groups(num_groups)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -44,7 +43,7 @@ unsigned int ChannelShuffleLayerNode::num_groups() const
bool ChannelShuffleLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -75,4 +74,4 @@ void ChannelShuffleLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/ConcatenateLayerNode.cpp b/src/graph/nodes/ConcatenateLayerNode.cpp
index 3f3c70f3bb..8e5393a5e4 100644
--- a/src/graph/nodes/ConcatenateLayerNode.cpp
+++ b/src/graph/nodes/ConcatenateLayerNode.cpp
@@ -24,17 +24,17 @@
#include "arm_compute/graph/nodes/ConcatenateLayerNode.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
#include "arm_compute/graph/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-
namespace arm_compute
{
namespace graph
{
-ConcatenateLayerNode::ConcatenateLayerNode(unsigned int total_nodes, descriptors::ConcatLayerDescriptor concat_descriptor)
+ConcatenateLayerNode::ConcatenateLayerNode(unsigned int total_nodes,
+ descriptors::ConcatLayerDescriptor concat_descriptor)
: _total_nodes(total_nodes), _concat_descriptor(std::move(concat_descriptor)), _is_enabled(true)
{
_input_edges.resize(_total_nodes, EmptyEdgeID);
@@ -73,7 +73,7 @@ TensorDescriptor ConcatenateLayerNode::compute_output_descriptor(const std::vect
// Extract shapes
std::vector<const TensorShape *> shapes;
shapes.reserve(input_descriptors.size());
- for(auto &input_descriptor : input_descriptors)
+ for (auto &input_descriptor : input_descriptors)
{
shapes.emplace_back(&input_descriptor.shape);
}
@@ -85,7 +85,7 @@ TensorDescriptor ConcatenateLayerNode::compute_output_descriptor(const std::vect
bool ConcatenateLayerNode::forward_descriptors()
{
- if(_outputs[0] != NullTensorID)
+ if (_outputs[0] != NullTensorID)
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -101,24 +101,22 @@ TensorDescriptor ConcatenateLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
// Check if all input tensors are set
- bool are_all_inputs_set = std::all_of(std::begin(_input_edges), std::end(_input_edges), [](const EdgeID & eid)
- {
- return eid != EmptyEdgeID;
- });
+ bool are_all_inputs_set = std::all_of(std::begin(_input_edges), std::end(_input_edges),
+ [](const EdgeID &eid) { return eid != EmptyEdgeID; });
TensorDescriptor output_info = {};
- if(are_all_inputs_set)
+ if (are_all_inputs_set)
{
std::vector<TensorDescriptor> inputs_descriptors;
- for(unsigned int i = 0; i < _input_edges.size(); ++i)
+ for (unsigned int i = 0; i < _input_edges.size(); ++i)
{
const Tensor *t = _graph->tensor(input_id(i));
ARM_COMPUTE_ERROR_ON(t == nullptr);
inputs_descriptors.push_back(t->desc());
}
output_info = compute_output_descriptor(inputs_descriptors, _concat_descriptor.axis);
- if(!_concat_descriptor.output_qinfo.empty())
+ if (!_concat_descriptor.output_qinfo.empty())
{
output_info.quant_info = _concat_descriptor.output_qinfo;
}
diff --git a/src/graph/nodes/ConstNode.cpp b/src/graph/nodes/ConstNode.cpp
index eb96d63888..6e8fbff71a 100644
--- a/src/graph/nodes/ConstNode.cpp
+++ b/src/graph/nodes/ConstNode.cpp
@@ -30,15 +30,14 @@ namespace arm_compute
{
namespace graph
{
-ConstNode::ConstNode(TensorDescriptor desc)
- : _desc(std::move(desc))
+ConstNode::ConstNode(TensorDescriptor desc) : _desc(std::move(desc))
{
_outputs.resize(1, NullTensorID);
}
bool ConstNode::forward_descriptors()
{
- if(output_id(0) != NullTensorID)
+ if (output_id(0) != NullTensorID)
{
Tensor *t = output(0);
ARM_COMPUTE_ERROR_ON(t == nullptr);
diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
index ee9dde91d5..f0263fc84a 100644
--- a/src/graph/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -37,7 +37,12 @@ ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info,
ConvolutionMethod method,
FastMathHint fast_math_hint,
QuantizationInfo out_quant_info)
- : _info(std::move(info)), _num_groups(num_groups), _method(method), _fast_math_hint(fast_math_hint), _out_quant_info(std::move(out_quant_info)), _fused_activation()
+ : _info(std::move(info)),
+ _num_groups(num_groups),
+ _method(method),
+ _fast_math_hint(fast_math_hint),
+ _out_quant_info(std::move(out_quant_info)),
+ _fused_activation()
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -100,20 +105,22 @@ TensorDescriptor ConvolutionLayerNode::compute_output_descriptor(const TensorDes
const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
- std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+ std::tie(output_width, output_height) =
+ scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), output_width);
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), output_height);
- output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL),
+ weights_descriptor.shape[3]);
return output_descriptor;
}
bool ConvolutionLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -132,7 +139,7 @@ TensorDescriptor ConvolutionLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info);
- if(!_out_quant_info.empty())
+ if (!_out_quant_info.empty())
{
output_info.quant_info = _out_quant_info;
}
diff --git a/src/graph/nodes/DeconvolutionLayerNode.cpp b/src/graph/nodes/DeconvolutionLayerNode.cpp
index 3542d5ad10..2058ab21e5 100644
--- a/src/graph/nodes/DeconvolutionLayerNode.cpp
+++ b/src/graph/nodes/DeconvolutionLayerNode.cpp
@@ -56,20 +56,22 @@ TensorDescriptor DeconvolutionLayerNode::compute_output_descriptor(const TensorD
const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
- std::tie(output_width, output_height) = deconvolution_output_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+ std::tie(output_width, output_height) =
+ deconvolution_output_dimensions(input_width, input_height, kernel_width, kernel_height, info);
const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), output_width);
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), output_height);
- output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL),
+ weights_descriptor.shape[3]);
return output_descriptor;
}
bool DeconvolutionLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -89,7 +91,7 @@ TensorDescriptor DeconvolutionLayerNode::configure_output(size_t idx) const
TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), descriptor.info);
- if(!descriptor.out_quant_info.empty())
+ if (!descriptor.out_quant_info.empty())
{
output_info.set_quantization_info(descriptor.out_quant_info);
}
diff --git a/src/graph/nodes/DepthToSpaceLayerNode.cpp b/src/graph/nodes/DepthToSpaceLayerNode.cpp
index b70ac56a07..0b914a0e56 100644
--- a/src/graph/nodes/DepthToSpaceLayerNode.cpp
+++ b/src/graph/nodes/DepthToSpaceLayerNode.cpp
@@ -32,8 +32,7 @@ namespace arm_compute
{
namespace graph
{
-DepthToSpaceLayerNode::DepthToSpaceLayerNode(int block_shape)
- : _block_shape(block_shape)
+DepthToSpaceLayerNode::DepthToSpaceLayerNode(int block_shape) : _block_shape(block_shape)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -44,7 +43,8 @@ int DepthToSpaceLayerNode::block_shape() const
return _block_shape;
}
-TensorDescriptor DepthToSpaceLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor, int block_shape)
+TensorDescriptor DepthToSpaceLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ int block_shape)
{
using namespace arm_compute::helpers::tensor_transform;
@@ -53,14 +53,15 @@ TensorDescriptor DepthToSpaceLayerNode::compute_output_descriptor(const TensorDe
// Set descriptor shape
TensorDescriptor output_descriptor = input_descriptor;
- output_descriptor.shape = misc::shape_calculator::compute_depth_to_space_shape(input_shape, data_layout, block_shape);
+ output_descriptor.shape =
+ misc::shape_calculator::compute_depth_to_space_shape(input_shape, data_layout, block_shape);
return output_descriptor;
}
bool DepthToSpaceLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
index 7de20165cb..92d7266088 100644
--- a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -32,9 +32,15 @@ namespace arm_compute
{
namespace graph
{
-DepthwiseConvolutionLayerNode::DepthwiseConvolutionLayerNode(PadStrideInfo info, int depth_multiplier, DepthwiseConvolutionMethod method,
- QuantizationInfo out_quant_info)
- : _info(std::move(info)), _depth_multiplier(depth_multiplier), _method(method), _out_quant_info(std::move(out_quant_info)), _fused_activation()
+DepthwiseConvolutionLayerNode::DepthwiseConvolutionLayerNode(PadStrideInfo info,
+ int depth_multiplier,
+ DepthwiseConvolutionMethod method,
+ QuantizationInfo out_quant_info)
+ : _info(std::move(info)),
+ _depth_multiplier(depth_multiplier),
+ _method(method),
+ _out_quant_info(std::move(out_quant_info)),
+ _fused_activation()
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -89,20 +95,22 @@ TensorDescriptor DepthwiseConvolutionLayerNode::compute_output_descriptor(const
const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
- std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+ std::tie(output_width, output_height) =
+ scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), output_width);
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), output_height);
- output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), input_channels * depth_multiplier);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL),
+ input_channels * depth_multiplier);
return output_descriptor;
}
bool DepthwiseConvolutionLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -121,7 +129,7 @@ TensorDescriptor DepthwiseConvolutionLayerNode::configure_output(size_t idx) con
ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info, _depth_multiplier);
- if(!_out_quant_info.empty())
+ if (!_out_quant_info.empty())
{
output_info.quant_info = _out_quant_info;
}
@@ -139,4 +147,4 @@ void DepthwiseConvolutionLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/DequantizationLayerNode.cpp b/src/graph/nodes/DequantizationLayerNode.cpp
index 14c4752f12..3ea000852a 100644
--- a/src/graph/nodes/DequantizationLayerNode.cpp
+++ b/src/graph/nodes/DequantizationLayerNode.cpp
@@ -40,7 +40,7 @@ DequantizationLayerNode::DequantizationLayerNode()
bool DequantizationLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -74,4 +74,4 @@ void DequantizationLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/DetectionOutputLayerNode.cpp b/src/graph/nodes/DetectionOutputLayerNode.cpp
index fc6f531ee0..65ddd2f5bc 100644
--- a/src/graph/nodes/DetectionOutputLayerNode.cpp
+++ b/src/graph/nodes/DetectionOutputLayerNode.cpp
@@ -32,8 +32,7 @@ namespace arm_compute
{
namespace graph
{
-DetectionOutputLayerNode::DetectionOutputLayerNode(DetectionOutputLayerInfo detection_info)
- : _info(detection_info)
+DetectionOutputLayerNode::DetectionOutputLayerNode(DetectionOutputLayerInfo detection_info) : _info(detection_info)
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -47,7 +46,8 @@ DetectionOutputLayerInfo DetectionOutputLayerNode::detection_output_info() const
TensorDescriptor DetectionOutputLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
const DetectionOutputLayerInfo &info)
{
- const unsigned int max_size = info.keep_top_k() * ((input_descriptor.shape.num_dimensions() > 1) ? input_descriptor.shape[1] : 1);
+ const unsigned int max_size =
+ info.keep_top_k() * ((input_descriptor.shape.num_dimensions() > 1) ? input_descriptor.shape[1] : 1);
TensorDescriptor output_descriptor = input_descriptor;
output_descriptor.shape.set(0, detection_size);
@@ -58,7 +58,8 @@ TensorDescriptor DetectionOutputLayerNode::compute_output_descriptor(const Tenso
bool DetectionOutputLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (input_id(2) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (input_id(2) != NullTensorID) &&
+ (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/DetectionPostProcessLayerNode.cpp b/src/graph/nodes/DetectionPostProcessLayerNode.cpp
index 2c5005af30..af3fc03d67 100644
--- a/src/graph/nodes/DetectionPostProcessLayerNode.cpp
+++ b/src/graph/nodes/DetectionPostProcessLayerNode.cpp
@@ -46,10 +46,11 @@ DetectionPostProcessLayerInfo DetectionPostProcessLayerNode::detection_post_proc
bool DetectionPostProcessLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (input_id(2) != NullTensorID) && (output_id(0) != NullTensorID) && (output_id(1) != NullTensorID)
- && (output_id(2) != NullTensorID) && (output_id(3) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (input_id(2) != NullTensorID) &&
+ (output_id(0) != NullTensorID) && (output_id(1) != NullTensorID) && (output_id(2) != NullTensorID) &&
+ (output_id(3) != NullTensorID))
{
- for(unsigned int i = 0; i < 4; ++i)
+ for (unsigned int i = 0; i < 4; ++i)
{
Tensor *dst = output(i);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -68,7 +69,7 @@ TensorDescriptor DetectionPostProcessLayerNode::configure_output(size_t idx) con
TensorDescriptor output_desc;
const unsigned int num_detected_box = _info.max_detections() * _info.max_classes_per_detection();
- switch(idx)
+ switch (idx)
{
case 0:
// Configure boxes output
@@ -101,4 +102,4 @@ void DetectionPostProcessLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/DummyNode.cpp b/src/graph/nodes/DummyNode.cpp
index 6fa9fbaf56..b5f37bd79b 100644
--- a/src/graph/nodes/DummyNode.cpp
+++ b/src/graph/nodes/DummyNode.cpp
@@ -32,8 +32,7 @@ namespace arm_compute
{
namespace graph
{
-DummyNode::DummyNode(TensorShape shape)
- : _shape(shape)
+DummyNode::DummyNode(TensorShape shape) : _shape(shape)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -41,7 +40,7 @@ DummyNode::DummyNode(TensorShape shape)
bool DummyNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -75,4 +74,4 @@ void DummyNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/EltwiseLayerNode.cpp b/src/graph/nodes/EltwiseLayerNode.cpp
index 4426e953ee..3f7a08e64d 100644
--- a/src/graph/nodes/EltwiseLayerNode.cpp
+++ b/src/graph/nodes/EltwiseLayerNode.cpp
@@ -31,8 +31,7 @@ namespace arm_compute
{
namespace graph
{
-EltwiseLayerNode::EltwiseLayerNode(const descriptors::EltwiseLayerDescriptor &descriptor)
- : descriptor(descriptor)
+EltwiseLayerNode::EltwiseLayerNode(const descriptors::EltwiseLayerDescriptor &descriptor) : descriptor(descriptor)
{
_input_edges.resize(2, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -70,7 +69,7 @@ void EltwiseLayerNode::set_fused_activation(ActivationLayerInfo fused_activation
bool EltwiseLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -97,7 +96,7 @@ TensorDescriptor EltwiseLayerNode::configure_output(size_t idx) const
output_info.set_shape(out_shape);
- if(!descriptor.out_quant_info.empty())
+ if (!descriptor.out_quant_info.empty())
{
output_info.set_quantization_info(descriptor.out_quant_info);
}
@@ -134,7 +133,7 @@ void UnaryEltwiseLayerNode::set_fused_activation(ActivationLayerInfo fused_activ
bool UnaryEltwiseLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -153,7 +152,7 @@ TensorDescriptor UnaryEltwiseLayerNode::configure_output(size_t idx) const
auto output_info = src->desc();
- if(!descriptor.out_quant_info.empty())
+ if (!descriptor.out_quant_info.empty())
{
output_info.set_quantization_info(descriptor.out_quant_info);
}
diff --git a/src/graph/nodes/FlattenLayerNode.cpp b/src/graph/nodes/FlattenLayerNode.cpp
index 48519a1695..952df2f3ec 100644
--- a/src/graph/nodes/FlattenLayerNode.cpp
+++ b/src/graph/nodes/FlattenLayerNode.cpp
@@ -38,7 +38,7 @@ FlattenLayerNode::FlattenLayerNode()
bool FlattenLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -72,4 +72,4 @@ void FlattenLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index 6278227878..1eed69ddaf 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -21,18 +21,23 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/nodes/FullyConnectedLayerNode.h"
-
#include "arm_compute/core/Utils.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/nodes/FullyConnectedLayerNode.h"
namespace arm_compute
{
namespace graph
{
-FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs, QuantizationInfo out_quant_info, FullyConnectedLayerInfo fc_info, FastMathHint fast_math_hint)
- : _num_outputs(num_outputs), _out_quant_info(std::move(out_quant_info)), _info(fc_info), _fast_math_hint(fast_math_hint)
+FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs,
+ QuantizationInfo out_quant_info,
+ FullyConnectedLayerInfo fc_info,
+ FastMathHint fast_math_hint)
+ : _num_outputs(num_outputs),
+ _out_quant_info(std::move(out_quant_info)),
+ _info(fc_info),
+ _fast_math_hint(fast_math_hint)
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -60,11 +65,11 @@ TensorDescriptor FullyConnectedLayerNode::compute_weights_descriptor(const Tenso
unsigned int num_weights = 1;
unsigned int num_dimensions = input_descriptor.shape.num_dimensions();
// Ignore the batch dimension if there is one:
- if(num_dimensions == 2 || num_dimensions == 4)
+ if (num_dimensions == 2 || num_dimensions == 4)
{
num_dimensions--;
}
- for(unsigned int i = 0; i < num_dimensions; i++)
+ for (unsigned int i = 0; i < num_dimensions; i++)
{
num_weights *= input_descriptor.shape[i];
}
@@ -73,13 +78,13 @@ TensorDescriptor FullyConnectedLayerNode::compute_weights_descriptor(const Tenso
weights_descriptor.shape = TensorShape(num_weights, num_outputs);
// If weights are tranposed, use tranposed shape
- if(!fc_info.transpose_weights)
+ if (!fc_info.transpose_weights)
{
weights_descriptor.shape = TensorShape(num_outputs, num_weights);
}
// Set quantization info if present
- if(!weights_quant_info.empty())
+ if (!weights_quant_info.empty())
{
weights_descriptor.quant_info = weights_quant_info;
}
@@ -93,7 +98,7 @@ TensorDescriptor FullyConnectedLayerNode::compute_output_descriptor(const Tensor
{
// Note: Only 1D batch space is supported at the moment
unsigned int batches = input_descriptor.shape[1];
- if(input_descriptor.shape.num_dimensions() > 2)
+ if (input_descriptor.shape.num_dimensions() > 2)
{
batches = input_descriptor.shape[3];
}
@@ -103,7 +108,7 @@ TensorDescriptor FullyConnectedLayerNode::compute_output_descriptor(const Tensor
output_descriptor.shape = TensorShape(num_outputs, batches);
// Set quantization info if present
- if(!out_quant_info.empty())
+ if (!out_quant_info.empty())
{
output_descriptor.quant_info = out_quant_info;
}
@@ -118,7 +123,7 @@ FullyConnectedLayerInfo FullyConnectedLayerNode::info() const
bool FullyConnectedLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -147,4 +152,4 @@ void FullyConnectedLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp b/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp
index de995ebee9..9d37e84acf 100644
--- a/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp
+++ b/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp
@@ -32,12 +32,18 @@ namespace arm_compute
{
namespace graph
{
-FusedConvolutionBatchNormalizationNode::FusedConvolutionBatchNormalizationNode(float epsilon, PadStrideInfo info,
- unsigned int num_groups,
- ConvolutionMethod method,
- FastMathHint fast_math_hint,
+FusedConvolutionBatchNormalizationNode::FusedConvolutionBatchNormalizationNode(float epsilon,
+ PadStrideInfo info,
+ unsigned int num_groups,
+ ConvolutionMethod method,
+ FastMathHint fast_math_hint,
ActivationLayerInfo fused_activation)
- : _epsilon(epsilon), _info(std::move(info)), _num_groups(num_groups), _method(method), _fast_math_hint(fast_math_hint), _fused_activation(fused_activation)
+ : _epsilon(epsilon),
+ _info(std::move(info)),
+ _num_groups(num_groups),
+ _method(method),
+ _fast_math_hint(fast_math_hint),
+ _fused_activation(fused_activation)
{
_input_edges.resize(7, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -88,9 +94,8 @@ void FusedConvolutionBatchNormalizationNode::set_fused_activation(ActivationLaye
_fused_activation = fused_activation;
}
-TensorDescriptor FusedConvolutionBatchNormalizationNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
- const TensorDescriptor &weights_descriptor,
- const PadStrideInfo &info)
+TensorDescriptor FusedConvolutionBatchNormalizationNode::compute_output_descriptor(
+ const TensorDescriptor &input_descriptor, const TensorDescriptor &weights_descriptor, const PadStrideInfo &info)
{
unsigned int output_width = 0;
unsigned int output_height = 0;
@@ -100,20 +105,22 @@ TensorDescriptor FusedConvolutionBatchNormalizationNode::compute_output_descript
const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
- std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+ std::tie(output_width, output_height) =
+ scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), output_width);
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), output_height);
- output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL),
+ weights_descriptor.shape[3]);
return output_descriptor;
}
bool FusedConvolutionBatchNormalizationNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.cpp b/src/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.cpp
index c022450b9d..c51641d64c 100644
--- a/src/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.cpp
+++ b/src/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.cpp
@@ -32,18 +32,24 @@ namespace arm_compute
{
namespace graph
{
-FusedDepthwiseConvolutionBatchNormalizationNode::FusedDepthwiseConvolutionBatchNormalizationNode(float epsilon,
- PadStrideInfo info,
- unsigned int depth_multiplier,
- DepthwiseConvolutionMethod method,
- ActivationLayerInfo fused_activation)
- : _epsilon(epsilon), _info(std::move(info)), _depth_multiplier(depth_multiplier), _method(method), _fused_activation(fused_activation)
+FusedDepthwiseConvolutionBatchNormalizationNode::FusedDepthwiseConvolutionBatchNormalizationNode(
+ float epsilon,
+ PadStrideInfo info,
+ unsigned int depth_multiplier,
+ DepthwiseConvolutionMethod method,
+ ActivationLayerInfo fused_activation)
+ : _epsilon(epsilon),
+ _info(std::move(info)),
+ _depth_multiplier(depth_multiplier),
+ _method(method),
+ _fused_activation(fused_activation)
{
_input_edges.resize(7, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
}
-void FusedDepthwiseConvolutionBatchNormalizationNode::set_depthwise_convolution_method(DepthwiseConvolutionMethod method)
+void FusedDepthwiseConvolutionBatchNormalizationNode::set_depthwise_convolution_method(
+ DepthwiseConvolutionMethod method)
{
_method = method;
}
@@ -78,10 +84,11 @@ void FusedDepthwiseConvolutionBatchNormalizationNode::set_fused_activation(Activ
_fused_activation = fused_activation;
}
-TensorDescriptor FusedDepthwiseConvolutionBatchNormalizationNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
- const TensorDescriptor &weights_descriptor,
- const PadStrideInfo &info,
- int depth_multiplier)
+TensorDescriptor
+FusedDepthwiseConvolutionBatchNormalizationNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const TensorDescriptor &weights_descriptor,
+ const PadStrideInfo &info,
+ int depth_multiplier)
{
unsigned int output_width = 0;
unsigned int output_height = 0;
@@ -92,19 +99,22 @@ TensorDescriptor FusedDepthwiseConvolutionBatchNormalizationNode::compute_output
const unsigned int kernel_width = get_dimension_size(weights_descriptor, DataLayoutDimension::WIDTH);
const unsigned int kernel_height = get_dimension_size(weights_descriptor, DataLayoutDimension::HEIGHT);
- std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+ std::tie(output_width, output_height) =
+ scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
TensorDescriptor output_descriptor = input_descriptor;
output_descriptor.shape.set(get_dimension_idx(output_descriptor.layout, DataLayoutDimension::WIDTH), output_width);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor.layout, DataLayoutDimension::HEIGHT), output_height);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor.layout, DataLayoutDimension::CHANNEL), input_channels * depth_multiplier);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor.layout, DataLayoutDimension::HEIGHT),
+ output_height);
+ output_descriptor.shape.set(get_dimension_idx(output_descriptor.layout, DataLayoutDimension::CHANNEL),
+ input_channels * depth_multiplier);
return output_descriptor;
}
bool FusedDepthwiseConvolutionBatchNormalizationNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/GenerateProposalsLayerNode.cpp b/src/graph/nodes/GenerateProposalsLayerNode.cpp
index 9f36862818..1671a47a95 100644
--- a/src/graph/nodes/GenerateProposalsLayerNode.cpp
+++ b/src/graph/nodes/GenerateProposalsLayerNode.cpp
@@ -23,17 +23,15 @@
*/
#include "arm_compute/graph/nodes/GenerateProposalsLayerNode.h"
+#include "arm_compute/core/Helpers.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
-#include "arm_compute/core/Helpers.h"
-
namespace arm_compute
{
namespace graph
{
-GenerateProposalsLayerNode::GenerateProposalsLayerNode(GenerateProposalsInfo &info)
- : _info(info)
+GenerateProposalsLayerNode::GenerateProposalsLayerNode(GenerateProposalsInfo &info) : _info(info)
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(3, NullTensorID);
@@ -46,10 +44,10 @@ const GenerateProposalsInfo &GenerateProposalsLayerNode::info() const
bool GenerateProposalsLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (input_id(2) != NullTensorID) && (output_id(0) != NullTensorID) && (output_id(1) != NullTensorID)
- && (output_id(2) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (input_id(2) != NullTensorID) &&
+ (output_id(0) != NullTensorID) && (output_id(1) != NullTensorID) && (output_id(2) != NullTensorID))
{
- for(unsigned int i = 0; i < 3; ++i)
+ for (unsigned int i = 0; i < 3; ++i)
{
Tensor *dst = output(i);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -68,7 +66,7 @@ TensorDescriptor GenerateProposalsLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(src == nullptr);
TensorDescriptor output_desc = src->desc();
- switch(idx)
+ switch (idx)
{
case 0:
// Configure proposals output
diff --git a/src/graph/nodes/InputNode.cpp b/src/graph/nodes/InputNode.cpp
index 072281f259..7408bc265d 100644
--- a/src/graph/nodes/InputNode.cpp
+++ b/src/graph/nodes/InputNode.cpp
@@ -30,15 +30,14 @@ namespace arm_compute
{
namespace graph
{
-InputNode::InputNode(TensorDescriptor desc)
- : _desc(std::move(desc))
+InputNode::InputNode(TensorDescriptor desc) : _desc(std::move(desc))
{
_outputs.resize(1, NullTensorID);
}
bool InputNode::forward_descriptors()
{
- if(output_id(0) != NullTensorID)
+ if (output_id(0) != NullTensorID)
{
Tensor *t = output(0);
ARM_COMPUTE_ERROR_ON(t == nullptr);
diff --git a/src/graph/nodes/L2NormalizeLayerNode.cpp b/src/graph/nodes/L2NormalizeLayerNode.cpp
index 0c35a335fa..1a57cf0199 100644
--- a/src/graph/nodes/L2NormalizeLayerNode.cpp
+++ b/src/graph/nodes/L2NormalizeLayerNode.cpp
@@ -30,18 +30,15 @@ namespace arm_compute
{
namespace graph
{
-L2NormalizeLayerNode::L2NormalizeLayerNode()
- : L2NormalizeLayerNode(0, 1e-12f)
+L2NormalizeLayerNode::L2NormalizeLayerNode() : L2NormalizeLayerNode(0, 1e-12f)
{
}
-L2NormalizeLayerNode::L2NormalizeLayerNode(int axis)
- : L2NormalizeLayerNode(axis, 1e-12f)
+L2NormalizeLayerNode::L2NormalizeLayerNode(int axis) : L2NormalizeLayerNode(axis, 1e-12f)
{
}
-L2NormalizeLayerNode::L2NormalizeLayerNode(int axis, float epsilon)
- : _axis(axis), _epsilon(epsilon)
+L2NormalizeLayerNode::L2NormalizeLayerNode(int axis, float epsilon) : _axis(axis), _epsilon(epsilon)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -49,7 +46,7 @@ L2NormalizeLayerNode::L2NormalizeLayerNode(int axis, float epsilon)
bool L2NormalizeLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -92,4 +89,4 @@ void L2NormalizeLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/NormalizationLayerNode.cpp b/src/graph/nodes/NormalizationLayerNode.cpp
index eaa1bcf924..b18bb7dd93 100644
--- a/src/graph/nodes/NormalizationLayerNode.cpp
+++ b/src/graph/nodes/NormalizationLayerNode.cpp
@@ -31,8 +31,7 @@ namespace arm_compute
{
namespace graph
{
-NormalizationLayerNode::NormalizationLayerNode(NormalizationLayerInfo norm_info)
- : _info(norm_info)
+NormalizationLayerNode::NormalizationLayerNode(NormalizationLayerInfo norm_info) : _info(norm_info)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -45,7 +44,7 @@ NormalizationLayerInfo NormalizationLayerNode::normalization_info() const
bool NormalizationLayerNode::forward_descriptors()
{
- if(input_id(0) != NullTensorID && (output_id(0) != NullTensorID))
+ if (input_id(0) != NullTensorID && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -76,4 +75,4 @@ void NormalizationLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/NormalizePlanarYUVLayerNode.cpp b/src/graph/nodes/NormalizePlanarYUVLayerNode.cpp
index 113d0a541f..cac96606ea 100644
--- a/src/graph/nodes/NormalizePlanarYUVLayerNode.cpp
+++ b/src/graph/nodes/NormalizePlanarYUVLayerNode.cpp
@@ -39,7 +39,7 @@ NormalizePlanarYUVLayerNode::NormalizePlanarYUVLayerNode()
bool NormalizePlanarYUVLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/PReluLayerNode.cpp b/src/graph/nodes/PReluLayerNode.cpp
index 378c18e3bb..2b50fe9234 100644
--- a/src/graph/nodes/PReluLayerNode.cpp
+++ b/src/graph/nodes/PReluLayerNode.cpp
@@ -38,7 +38,7 @@ PReluLayerNode::PReluLayerNode()
bool PReluLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/PadLayerNode.cpp b/src/graph/nodes/PadLayerNode.cpp
index 6424370d41..336e7de05a 100644
--- a/src/graph/nodes/PadLayerNode.cpp
+++ b/src/graph/nodes/PadLayerNode.cpp
@@ -23,17 +23,15 @@
*/
#include "arm_compute/graph/nodes/PadLayerNode.h"
+#include "arm_compute/core/Helpers.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
-#include "arm_compute/core/Helpers.h"
-
namespace arm_compute
{
namespace graph
{
-PadLayerNode::PadLayerNode(const PaddingList &padding, PixelValue pad_value)
- : _padding(padding), _pad_value(pad_value)
+PadLayerNode::PadLayerNode(const PaddingList &padding, PixelValue pad_value) : _padding(padding), _pad_value(pad_value)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -51,7 +49,7 @@ PixelValue PadLayerNode::pad_value() const
bool PadLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -71,7 +69,7 @@ TensorDescriptor PadLayerNode::configure_output(size_t idx) const
TensorDescriptor output_desc = src->desc();
const TensorShape input_shape = src->desc().shape;
- for(size_t dim = 0; dim < _padding.size(); ++dim)
+ for (size_t dim = 0; dim < _padding.size(); ++dim)
{
output_desc.shape.set(dim, _padding[dim].first + input_shape[dim] + _padding[dim].second);
}
diff --git a/src/graph/nodes/PermuteLayerNode.cpp b/src/graph/nodes/PermuteLayerNode.cpp
index b311ee1301..db53722363 100644
--- a/src/graph/nodes/PermuteLayerNode.cpp
+++ b/src/graph/nodes/PermuteLayerNode.cpp
@@ -23,17 +23,15 @@
*/
#include "arm_compute/graph/nodes/PermuteLayerNode.h"
+#include "arm_compute/core/Helpers.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
-#include "arm_compute/core/Helpers.h"
-
namespace arm_compute
{
namespace graph
{
-PermuteLayerNode::PermuteLayerNode(PermutationVector perm, DataLayout layout)
- : _perm(perm), _layout(layout)
+PermuteLayerNode::PermuteLayerNode(PermutationVector perm, DataLayout layout) : _perm(perm), _layout(layout)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -46,7 +44,7 @@ const PermutationVector &PermuteLayerNode::permutation_vector() const
bool PermuteLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -66,7 +64,7 @@ TensorDescriptor PermuteLayerNode::configure_output(size_t idx) const
TensorDescriptor output_desc = src->desc();
permute(output_desc.shape, _perm);
- if(_layout != DataLayout::UNKNOWN)
+ if (_layout != DataLayout::UNKNOWN)
{
output_desc.layout = _layout;
}
@@ -84,4 +82,4 @@ void PermuteLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/PoolingLayerNode.cpp b/src/graph/nodes/PoolingLayerNode.cpp
index 4ecf924a5e..ac954acbe3 100644
--- a/src/graph/nodes/PoolingLayerNode.cpp
+++ b/src/graph/nodes/PoolingLayerNode.cpp
@@ -32,8 +32,7 @@ namespace arm_compute
{
namespace graph
{
-PoolingLayerNode::PoolingLayerNode(PoolingLayerInfo pool_info)
- : _info(std::move(pool_info))
+PoolingLayerNode::PoolingLayerNode(PoolingLayerInfo pool_info) : _info(std::move(pool_info))
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -55,7 +54,8 @@ TensorDescriptor PoolingLayerNode::compute_output_descriptor(const TensorDescrip
const unsigned int pool_size_x = info.is_global_pooling ? input_width : info.pool_size.width;
const unsigned int pool_size_y = info.is_global_pooling ? input_height : info.pool_size.height;
- std::tie(pooled_width, pooled_height) = scaled_dimensions(input_width, input_height, pool_size_x, pool_size_y, info.pad_stride_info);
+ std::tie(pooled_width, pooled_height) =
+ scaled_dimensions(input_width, input_height, pool_size_x, pool_size_y, info.pad_stride_info);
const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
@@ -67,7 +67,7 @@ TensorDescriptor PoolingLayerNode::compute_output_descriptor(const TensorDescrip
bool PoolingLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -98,4 +98,4 @@ void PoolingLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/PrintLayerNode.cpp b/src/graph/nodes/PrintLayerNode.cpp
index da408d8c4d..82a340005b 100644
--- a/src/graph/nodes/PrintLayerNode.cpp
+++ b/src/graph/nodes/PrintLayerNode.cpp
@@ -32,7 +32,9 @@ namespace arm_compute
{
namespace graph
{
-PrintLayerNode::PrintLayerNode(std::ostream &stream, const IOFormatInfo &format_info, const std::function<ITensor *(ITensor *)> transform)
+PrintLayerNode::PrintLayerNode(std::ostream &stream,
+ const IOFormatInfo &format_info,
+ const std::function<ITensor *(ITensor *)> transform)
: _stream(stream), _format_info(format_info), _transform(transform)
{
_input_edges.resize(1, EmptyEdgeID);
@@ -56,7 +58,7 @@ const std::function<ITensor *(ITensor *)> PrintLayerNode::transform() const
bool PrintLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -88,4 +90,4 @@ void PrintLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/PriorBoxLayerNode.cpp b/src/graph/nodes/PriorBoxLayerNode.cpp
index f017ead880..5ffb173333 100644
--- a/src/graph/nodes/PriorBoxLayerNode.cpp
+++ b/src/graph/nodes/PriorBoxLayerNode.cpp
@@ -32,8 +32,7 @@ namespace arm_compute
{
namespace graph
{
-PriorBoxLayerNode::PriorBoxLayerNode(PriorBoxLayerInfo prior_info)
- : _info(std::move(prior_info))
+PriorBoxLayerNode::PriorBoxLayerNode(PriorBoxLayerInfo prior_info) : _info(std::move(prior_info))
{
_input_edges.resize(2, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -44,7 +43,7 @@ PriorBoxLayerInfo PriorBoxLayerNode::priorbox_info() const
return _info;
}
-TensorDescriptor PriorBoxLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+TensorDescriptor PriorBoxLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
const PriorBoxLayerInfo &info)
{
const unsigned int layer_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
@@ -61,7 +60,7 @@ TensorDescriptor PriorBoxLayerNode::compute_output_descriptor(const TensorDescri
bool PriorBoxLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/QuantizationLayerNode.cpp b/src/graph/nodes/QuantizationLayerNode.cpp
index 4906808dae..0dd2da919d 100644
--- a/src/graph/nodes/QuantizationLayerNode.cpp
+++ b/src/graph/nodes/QuantizationLayerNode.cpp
@@ -47,7 +47,7 @@ QuantizationLayerNode::QuantizationLayerNode(QuantizationInfo out_quant_info, Da
bool QuantizationLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/ROIAlignLayerNode.cpp b/src/graph/nodes/ROIAlignLayerNode.cpp
index 62891811f3..5909335826 100644
--- a/src/graph/nodes/ROIAlignLayerNode.cpp
+++ b/src/graph/nodes/ROIAlignLayerNode.cpp
@@ -24,17 +24,15 @@
#include "arm_compute/graph/nodes/ROIAlignLayerNode.h"
+#include "arm_compute/core/Helpers.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
-#include "arm_compute/core/Helpers.h"
-
namespace arm_compute
{
namespace graph
{
-ROIAlignLayerNode::ROIAlignLayerNode(ROIPoolingLayerInfo &pool_info)
- : _pool_info(pool_info)
+ROIAlignLayerNode::ROIAlignLayerNode(ROIPoolingLayerInfo &pool_info) : _pool_info(pool_info)
{
_input_edges.resize(2, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -47,7 +45,7 @@ const ROIPoolingLayerInfo &ROIAlignLayerNode::pooling_info() const
bool ROIAlignLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -92,4 +90,4 @@ void ROIAlignLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/ReductionLayerNode.cpp b/src/graph/nodes/ReductionLayerNode.cpp
index 0e93039894..965c1ba0a5 100644
--- a/src/graph/nodes/ReductionLayerNode.cpp
+++ b/src/graph/nodes/ReductionLayerNode.cpp
@@ -56,7 +56,7 @@ bool ReductionLayerNode::keep_dims() const
bool ReductionLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -74,8 +74,9 @@ TensorDescriptor ReductionLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- TensorDescriptor output_info = src->desc();
- TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(output_info.shape, _axis, _keep_dims);
+ TensorDescriptor output_info = src->desc();
+ TensorShape output_shape =
+ arm_compute::misc::shape_calculator::compute_reduced_shape(output_info.shape, _axis, _keep_dims);
output_info.set_shape(output_shape);
return output_info;
@@ -91,4 +92,4 @@ void ReductionLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/ReorgLayerNode.cpp b/src/graph/nodes/ReorgLayerNode.cpp
index e693e4b931..251a4ea1b2 100644
--- a/src/graph/nodes/ReorgLayerNode.cpp
+++ b/src/graph/nodes/ReorgLayerNode.cpp
@@ -31,8 +31,7 @@ namespace arm_compute
{
namespace graph
{
-ReorgLayerNode::ReorgLayerNode(int stride)
- : _stride(stride)
+ReorgLayerNode::ReorgLayerNode(int stride) : _stride(stride)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -51,20 +50,22 @@ TensorDescriptor ReorgLayerNode::compute_output_descriptor(const TensorDescripto
ARM_COMPUTE_ERROR_ON(stride <= 0);
ARM_COMPUTE_ERROR_ON_MSG((input_width % stride != 0), "The width of the input tensor must be a multiple of stride");
- ARM_COMPUTE_ERROR_ON_MSG((input_height % stride != 0), "The height of the input tensor must be a multiple of stride");
+ ARM_COMPUTE_ERROR_ON_MSG((input_height % stride != 0),
+ "The height of the input tensor must be a multiple of stride");
const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), input_width / stride);
output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), input_height / stride);
- output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), input_channel * stride * stride);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL),
+ input_channel * stride * stride);
return output_descriptor;
}
bool ReorgLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -95,4 +96,4 @@ void ReorgLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/ReshapeLayer.cpp b/src/graph/nodes/ReshapeLayer.cpp
index a6354d03ed..ce6bf9b803 100644
--- a/src/graph/nodes/ReshapeLayer.cpp
+++ b/src/graph/nodes/ReshapeLayer.cpp
@@ -21,17 +21,15 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/nodes/ReshapeLayerNode.h"
-
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/nodes/ReshapeLayerNode.h"
namespace arm_compute
{
namespace graph
{
-ReshapeLayerNode::ReshapeLayerNode(TensorShape shape)
- : _shape(shape)
+ReshapeLayerNode::ReshapeLayerNode(TensorShape shape) : _shape(shape)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -39,7 +37,7 @@ ReshapeLayerNode::ReshapeLayerNode(TensorShape shape)
bool ReshapeLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -73,4 +71,4 @@ void ReshapeLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/ResizeLayerNode.cpp b/src/graph/nodes/ResizeLayerNode.cpp
index 2a94bf6063..292b2c643e 100644
--- a/src/graph/nodes/ResizeLayerNode.cpp
+++ b/src/graph/nodes/ResizeLayerNode.cpp
@@ -50,7 +50,7 @@ std::pair<float, float> ResizeLayerNode::scaling_factor() const
bool ResizeLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -88,4 +88,4 @@ void ResizeLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/SliceLayerNode.cpp b/src/graph/nodes/SliceLayerNode.cpp
index b7655b9eae..eb877d9a24 100644
--- a/src/graph/nodes/SliceLayerNode.cpp
+++ b/src/graph/nodes/SliceLayerNode.cpp
@@ -32,8 +32,7 @@ namespace arm_compute
{
namespace graph
{
-SliceLayerNode::SliceLayerNode(const Coordinates &starts, const Coordinates &ends)
- : _starts(starts), _ends(ends)
+SliceLayerNode::SliceLayerNode(const Coordinates &starts, const Coordinates &ends) : _starts(starts), _ends(ends)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -50,19 +49,20 @@ Coordinates SliceLayerNode::ends() const
}
TensorDescriptor SliceLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
- const Coordinates &starts, const Coordinates &ends)
+ const Coordinates &starts,
+ const Coordinates &ends)
{
using namespace arm_compute::helpers::tensor_transform;
TensorDescriptor output_desc = input_descriptor;
- output_desc.shape = arm_compute::misc::shape_calculator::compute_slice_shape(input_descriptor.shape, starts, ends);
+ output_desc.shape = arm_compute::misc::shape_calculator::compute_slice_shape(input_descriptor.shape, starts, ends);
return output_desc;
}
bool SliceLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
diff --git a/src/graph/nodes/SoftmaxLayerNode.cpp b/src/graph/nodes/SoftmaxLayerNode.cpp
index 031166993a..4beac81b1f 100644
--- a/src/graph/nodes/SoftmaxLayerNode.cpp
+++ b/src/graph/nodes/SoftmaxLayerNode.cpp
@@ -31,8 +31,7 @@ namespace arm_compute
{
namespace graph
{
-SoftmaxLayerNode::SoftmaxLayerNode(float beta)
- : _beta(beta)
+SoftmaxLayerNode::SoftmaxLayerNode(float beta) : _beta(beta)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -45,7 +44,7 @@ float SoftmaxLayerNode::beta() const
bool SoftmaxLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -79,4 +78,4 @@ void SoftmaxLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/SplitLayerNode.cpp b/src/graph/nodes/SplitLayerNode.cpp
index 31931c3a79..dfb6624f80 100644
--- a/src/graph/nodes/SplitLayerNode.cpp
+++ b/src/graph/nodes/SplitLayerNode.cpp
@@ -49,8 +49,8 @@ unsigned int SplitLayerNode::axis() const
return _axis;
}
-std::pair<TensorDescriptor, Coordinates> SplitLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
- unsigned int num_splits, int axis, unsigned int idx)
+std::pair<TensorDescriptor, Coordinates> SplitLayerNode::compute_output_descriptor(
+ const TensorDescriptor &input_descriptor, unsigned int num_splits, int axis, unsigned int idx)
{
// Handle negative axis, negative index is used to specify axis from the end (e.g. -1 for the last axis).
int num_dimension = static_cast<int32_t>(input_descriptor.shape.num_dimensions());
@@ -58,7 +58,7 @@ std::pair<TensorDescriptor, Coordinates> SplitLayerNode::compute_output_descript
Coordinates coords;
TensorDescriptor output_descriptor = input_descriptor;
int split_size = input_descriptor.shape[tmp_axis] / num_splits;
- if(_size_splits.empty())
+ if (_size_splits.empty())
{
output_descriptor.shape.set(tmp_axis, split_size);
coords.set(tmp_axis, idx * split_size);
@@ -66,15 +66,15 @@ std::pair<TensorDescriptor, Coordinates> SplitLayerNode::compute_output_descript
else
{
int split_size = _size_splits[idx];
- if(split_size == -1)
+ if (split_size == -1)
{
split_size = input_descriptor.shape[tmp_axis];
- for(unsigned int i = 0; i < _size_splits.size() - 1; ++i)
+ for (unsigned int i = 0; i < _size_splits.size() - 1; ++i)
split_size -= _size_splits[i];
}
output_descriptor.shape.set(tmp_axis, split_size);
int coord_value = 0;
- for(unsigned int i = 0; i < idx; ++i)
+ for (unsigned int i = 0; i < idx; ++i)
coord_value += _size_splits[i];
coords.set(tmp_axis, coord_value);
}
@@ -84,12 +84,12 @@ std::pair<TensorDescriptor, Coordinates> SplitLayerNode::compute_output_descript
bool SplitLayerNode::forward_descriptors()
{
- if(input_id(0) != NullTensorID)
+ if (input_id(0) != NullTensorID)
{
validate();
- for(unsigned int i = 0; i < _outputs.size(); ++i)
+ for (unsigned int i = 0; i < _outputs.size(); ++i)
{
- if(output_id(i) != NullTensorID)
+ if (output_id(i) != NullTensorID)
{
Tensor *dst_i = output(i);
ARM_COMPUTE_ERROR_ON(dst_i == nullptr);
@@ -117,10 +117,10 @@ TensorDescriptor SplitLayerNode::configure_output(size_t idx) const
int tmp_axis = wrap_around(_axis, num_dimension);
int split_size = (_size_splits.empty()) ? (input_descriptor.shape[tmp_axis] / _num_splits) : _size_splits[idx];
- if(split_size == -1)
+ if (split_size == -1)
{
split_size = input_descriptor.shape[tmp_axis];
- for(unsigned int i = 0; i < _size_splits.size() - 1; ++i)
+ for (unsigned int i = 0; i < _size_splits.size() - 1; ++i)
split_size -= _size_splits[i];
}
output_descriptor.shape.set(tmp_axis, split_size);
@@ -138,7 +138,7 @@ Status SplitLayerNode::validate() const
// Handle negative axis, negative index is used to specify axis from the end (e.g. -1 for the last axis).
int tmp_axis = wrap_around(_axis, num_dimension);
- if(_size_splits.empty())
+ if (_size_splits.empty())
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->desc().shape[tmp_axis] % _num_splits, "Split should be exact");
}
@@ -156,4 +156,4 @@ void SplitLayerNode::accept(INodeVisitor &v)
v.visit(*this);
}
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/nodes/StackLayerNode.cpp b/src/graph/nodes/StackLayerNode.cpp
index f292b33ad0..031d8fc739 100644
--- a/src/graph/nodes/StackLayerNode.cpp
+++ b/src/graph/nodes/StackLayerNode.cpp
@@ -25,18 +25,16 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/INodeVisitor.h"
#include "arm_compute/graph/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-
namespace arm_compute
{
namespace graph
{
-StackLayerNode::StackLayerNode(unsigned int total_nodes, int axis)
- : _total_nodes(total_nodes), _axis(axis)
+StackLayerNode::StackLayerNode(unsigned int total_nodes, int axis) : _total_nodes(total_nodes), _axis(axis)
{
_input_edges.resize(_total_nodes, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -64,7 +62,7 @@ TensorDescriptor StackLayerNode::compute_output_descriptor(const std::vector<Ten
bool StackLayerNode::forward_descriptors()
{
- if(_outputs[0] != NullTensorID)
+ if (_outputs[0] != NullTensorID)
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);
@@ -80,17 +78,15 @@ TensorDescriptor StackLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
// Check if all input tensors are set
- bool are_all_inputs_set = std::all_of(std::begin(_input_edges), std::end(_input_edges), [](const EdgeID & eid)
- {
- return eid != EmptyEdgeID;
- });
+ bool are_all_inputs_set = std::all_of(std::begin(_input_edges), std::end(_input_edges),
+ [](const EdgeID &eid) { return eid != EmptyEdgeID; });
TensorDescriptor output_info = {};
- if(are_all_inputs_set)
+ if (are_all_inputs_set)
{
std::vector<TensorDescriptor> inputs_descriptors;
- for(unsigned int i = 0; i < _input_edges.size(); ++i)
+ for (unsigned int i = 0; i < _input_edges.size(); ++i)
{
const Tensor *t = _graph->tensor(input_id(i));
ARM_COMPUTE_ERROR_ON(t == nullptr);
diff --git a/src/graph/nodes/StridedSliceLayerNode.cpp b/src/graph/nodes/StridedSliceLayerNode.cpp
index 6a1a724bb3..fc9f72204c 100644
--- a/src/graph/nodes/StridedSliceLayerNode.cpp
+++ b/src/graph/nodes/StridedSliceLayerNode.cpp
@@ -79,7 +79,7 @@ TensorDescriptor StridedSliceLayerNode::compute_output_descriptor(const TensorDe
bool StridedSliceLayerNode::forward_descriptors()
{
- if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ if ((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
{
Tensor *dst = output(0);
ARM_COMPUTE_ERROR_ON(dst == nullptr);