aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/graph
diff options
context:
space:
mode:
authorFelix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-27 17:46:17 +0100
committerfelixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-28 12:08:05 +0000
commitafd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch)
tree03bc7d5a762099989b16a656fa8d397b490ed70e /arm_compute/graph
parentbdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff)
downloadComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz
Apply clang-format on repository
Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'arm_compute/graph')
-rw-r--r--arm_compute/graph/Edge.h14
-rw-r--r--arm_compute/graph/Graph.h18
-rw-r--r--arm_compute/graph/GraphBuilder.h196
-rw-r--r--arm_compute/graph/GraphContext.h17
-rw-r--r--arm_compute/graph/IDeviceBackend.h3
-rw-r--r--arm_compute/graph/LayerDescriptors.h35
-rw-r--r--arm_compute/graph/Logger.h16
-rw-r--r--arm_compute/graph/Tensor.h3
-rw-r--r--arm_compute/graph/TensorDescriptor.h16
-rw-r--r--arm_compute/graph/TypePrinter.h12
-rw-r--r--arm_compute/graph/Types.h23
-rw-r--r--arm_compute/graph/Utils.h2
-rw-r--r--arm_compute/graph/Workload.h13
-rw-r--r--arm_compute/graph/backends/BackendRegistrar.h4
-rw-r--r--arm_compute/graph/backends/CL/CLDeviceBackend.h20
-rw-r--r--arm_compute/graph/backends/CL/CLSubTensorHandle.h14
-rw-r--r--arm_compute/graph/backends/CL/CLTensorHandle.h9
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h655
-rw-r--r--arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h31
-rw-r--r--arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h36
-rw-r--r--arm_compute/graph/backends/NEON/NEDeviceBackend.h16
-rw-r--r--arm_compute/graph/backends/NEON/NESubTensorHandle.h14
-rw-r--r--arm_compute/graph/backends/NEON/NETensorHandle.h9
-rw-r--r--arm_compute/graph/backends/Utils.h8
-rw-r--r--arm_compute/graph/backends/ValidateHelpers.h132
-rw-r--r--arm_compute/graph/frontend/IStream.h4
-rw-r--r--arm_compute/graph/frontend/Layers.h416
-rw-r--r--arm_compute/graph/frontend/Stream.h3
-rw-r--r--arm_compute/graph/frontend/SubStream.h2
-rw-r--r--arm_compute/graph/frontend/Types.h29
-rw-r--r--arm_compute/graph/mutators/DepthConcatSubTensorMutator.h2
-rw-r--r--arm_compute/graph/mutators/GroupedConvolutionMutator.h2
-rw-r--r--arm_compute/graph/mutators/InPlaceOperationMutator.h2
-rw-r--r--arm_compute/graph/mutators/NodeExecutionMethodMutator.h2
-rw-r--r--arm_compute/graph/mutators/NodeFusionMutator.h2
-rw-r--r--arm_compute/graph/mutators/SplitLayerSubTensorMutator.h2
-rw-r--r--arm_compute/graph/mutators/SyntheticDataTypeMutator.h2
-rw-r--r--arm_compute/graph/nodes/ActivationLayerNode.h5
-rw-r--r--arm_compute/graph/nodes/ArgMinMaxLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/BatchNormalizationLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/ChannelShuffleLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/ConcatenateLayerNode.h5
-rw-r--r--arm_compute/graph/nodes/ConstNode.h2
-rw-r--r--arm_compute/graph/nodes/ConvolutionLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/DeconvolutionLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/DepthToSpaceLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/DequantizationLayerNode.h4
-rw-r--r--arm_compute/graph/nodes/DetectionOutputLayerNode.h5
-rw-r--r--arm_compute/graph/nodes/DetectionPostProcessLayerNode.h4
-rw-r--r--arm_compute/graph/nodes/DummyNode.h4
-rw-r--r--arm_compute/graph/nodes/EltwiseLayerNode.h4
-rw-r--r--arm_compute/graph/nodes/FlattenLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/FullyConnectedLayerNode.h4
-rw-r--r--arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h5
-rw-r--r--arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h4
-rw-r--r--arm_compute/graph/nodes/GenerateProposalsLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/InputNode.h2
-rw-r--r--arm_compute/graph/nodes/L2NormalizeLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/Nodes.h4
-rw-r--r--arm_compute/graph/nodes/NormalizationLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/OutputNode.h2
-rw-r--r--arm_compute/graph/nodes/PReluLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/PadLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/PermuteLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/PoolingLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/PrintLayerNode.h6
-rw-r--r--arm_compute/graph/nodes/PriorBoxLayerNode.h5
-rw-r--r--arm_compute/graph/nodes/QuantizationLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/ROIAlignLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/ReductionLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/ReorgLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/ReshapeLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/ResizeLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/SliceLayerNode.h5
-rw-r--r--arm_compute/graph/nodes/SoftmaxLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/SplitLayerNode.h6
-rw-r--r--arm_compute/graph/nodes/StackLayerNode.h2
-rw-r--r--arm_compute/graph/nodes/StridedSliceLayerNode.h2
-rw-r--r--arm_compute/graph/printers/DotGraphPrinter.h1
82 files changed, 958 insertions, 955 deletions
diff --git a/arm_compute/graph/Edge.h b/arm_compute/graph/Edge.h
index 5e81b9c52f..7f5075d885 100644
--- a/arm_compute/graph/Edge.h
+++ b/arm_compute/graph/Edge.h
@@ -48,8 +48,18 @@ public:
* @param[in] consumer_idx Consumer node input index
* @param[in] tensor Tensor associated with the edge
*/
- Edge(EdgeID id, INode *producer, unsigned int producer_idx, INode *consumer, unsigned int consumer_idx, Tensor *tensor)
- : _id(id), _producer(producer), _consumer(consumer), _producer_idx(producer_idx), _consumer_idx(consumer_idx), _tensor(tensor)
+ Edge(EdgeID id,
+ INode *producer,
+ unsigned int producer_idx,
+ INode *consumer,
+ unsigned int consumer_idx,
+ Tensor *tensor)
+ : _id(id),
+ _producer(producer),
+ _consumer(consumer),
+ _producer_idx(producer_idx),
+ _consumer_idx(consumer_idx),
+ _tensor(tensor)
{
}
diff --git a/arm_compute/graph/Graph.h b/arm_compute/graph/Graph.h
index 806d84c3fd..e6e173f5fa 100644
--- a/arm_compute/graph/Graph.h
+++ b/arm_compute/graph/Graph.h
@@ -79,7 +79,7 @@ public:
* @return ID of the node
*/
template <typename NT, typename... Ts>
- NodeID add_node(Ts &&... args);
+ NodeID add_node(Ts &&...args);
/** Remove the node with the given ID
*
* @param[in] nid ID of the node to remove
@@ -221,17 +221,17 @@ private:
TensorID create_tensor(const TensorDescriptor &desc = TensorDescriptor());
private:
- GraphID _id = GraphID(0); /**< Graph id */
- std::string _name = {}; /**< Graph name */
- std::vector<std::unique_ptr<INode>> _nodes = {}; /**< Graph nodes */
- std::vector<std::unique_ptr<Edge>> _edges = {}; /**< Graph edges */
- std::vector<std::unique_ptr<Tensor>> _tensors = {}; /**< Graph tensors */
+ GraphID _id = GraphID(0); /**< Graph id */
+ std::string _name = {}; /**< Graph name */
+ std::vector<std::unique_ptr<INode>> _nodes = {}; /**< Graph nodes */
+ std::vector<std::unique_ptr<Edge>> _edges = {}; /**< Graph edges */
+ std::vector<std::unique_ptr<Tensor>> _tensors = {}; /**< Graph tensors */
std::map<NodeType, std::vector<NodeID>> _tagged_nodes = {}; /**< Graph nodes map with the node type as key */
- arm_compute::Mutex _mtx = {}; /**< Mutex used for graph construction */
+ arm_compute::Mutex _mtx = {}; /**< Mutex used for graph construction */
};
template <typename NT, typename... Ts>
-inline NodeID Graph::add_node(Ts &&... args)
+inline NodeID Graph::add_node(Ts &&...args)
{
arm_compute::lock_guard<arm_compute::Mutex> lock(_mtx);
@@ -245,7 +245,7 @@ inline NodeID Graph::add_node(Ts &&... args)
_tagged_nodes[node->type()].push_back(nid);
// Associate a new tensor with each output
- for(auto &output : node->_outputs)
+ for (auto &output : node->_outputs)
{
output = create_tensor();
}
diff --git a/arm_compute/graph/GraphBuilder.h b/arm_compute/graph/GraphBuilder.h
index cb88c0e7aa..118d06bdda 100644
--- a/arm_compute/graph/GraphBuilder.h
+++ b/arm_compute/graph/GraphBuilder.h
@@ -51,7 +51,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_const_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor = nullptr);
+ static NodeID
+ add_const_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor = nullptr);
/** Adds an input layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -61,7 +62,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_input_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor = nullptr);
+ static NodeID
+ add_input_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor = nullptr);
/** Adds an output layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -71,7 +73,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_output_node(Graph &g, NodeParams params, NodeIdxPair input, ITensorAccessorUPtr accessor = nullptr);
+ static NodeID
+ add_output_node(Graph &g, NodeParams params, NodeIdxPair input, ITensorAccessorUPtr accessor = nullptr);
/** Adds an activation layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -82,7 +85,10 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_activation_node(Graph &g, NodeParams params, NodeIdxPair input, ActivationLayerInfo act_info,
+ static NodeID add_activation_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ ActivationLayerInfo act_info,
const QuantizationInfo &out_quant_info = QuantizationInfo());
/** Adds an activation layer node to the graph
*
@@ -96,7 +102,11 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_arg_min_max_node(Graph &g, NodeParams params, NodeIdxPair input, ReductionOperation op, unsigned int axis,
+ static NodeID add_arg_min_max_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ ReductionOperation op,
+ unsigned int axis,
DataType out_data_type = DataType::UNKNOWN,
const QuantizationInfo &out_quant_info = QuantizationInfo());
/** Adds a batch normalization layer node to the graph
@@ -112,9 +122,14 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_batch_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, float epsilon,
- ITensorAccessorUPtr mean_accessor = nullptr, ITensorAccessorUPtr var_accessor = nullptr,
- ITensorAccessorUPtr beta_accessor = nullptr, ITensorAccessorUPtr gamma_accessor = nullptr);
+ static NodeID add_batch_normalization_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ float epsilon,
+ ITensorAccessorUPtr mean_accessor = nullptr,
+ ITensorAccessorUPtr var_accessor = nullptr,
+ ITensorAccessorUPtr beta_accessor = nullptr,
+ ITensorAccessorUPtr gamma_accessor = nullptr);
/** Adds a bounding box transform layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -125,7 +140,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_bounding_box_transform_node(Graph &g, NodeParams params, NodeIdxPair input, NodeIdxPair deltas, BoundingBoxTransformInfo info);
+ static NodeID add_bounding_box_transform_node(
+ Graph &g, NodeParams params, NodeIdxPair input, NodeIdxPair deltas, BoundingBoxTransformInfo info);
/** Adds an channel shuffle layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -154,10 +170,17 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
- Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info, unsigned int num_groups = 1,
- ConvolutionMethod method = ConvolutionMethod::Default, FastMathHint fast_math_hint = FastMathHint::Disabled,
- ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr,
+ static NodeID add_convolution_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ Size2D kernel_spatial_extend,
+ unsigned int depth,
+ PadStrideInfo conv_info,
+ unsigned int num_groups = 1,
+ ConvolutionMethod method = ConvolutionMethod::Default,
+ FastMathHint fast_math_hint = FastMathHint::Disabled,
+ ITensorAccessorUPtr weights_accessor = nullptr,
+ ITensorAccessorUPtr bias_accessor = nullptr,
const QuantizationInfo &weights_quant_info = QuantizationInfo(),
const QuantizationInfo &out_quant_info = QuantizationInfo());
/** Adds a deconvolution layer node to the graph
@@ -173,9 +196,14 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_deconvolution_node(Graph &g, NodeParams params, NodeIdxPair input,
- Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo deconv_info,
- ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr);
+ static NodeID add_deconvolution_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ Size2D kernel_spatial_extend,
+ unsigned int depth,
+ PadStrideInfo deconv_info,
+ ITensorAccessorUPtr weights_accessor = nullptr,
+ ITensorAccessorUPtr bias_accessor = nullptr);
/** Adds a depth concatenate node to the graph
*
* @param[in] g Graph to add the node to
@@ -185,7 +213,10 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_concatenate_node(Graph &g, NodeParams params, const std::vector<NodeIdxPair> &inputs, const descriptors::ConcatLayerDescriptor &concat_descriptor);
+ static NodeID add_concatenate_node(Graph &g,
+ NodeParams params,
+ const std::vector<NodeIdxPair> &inputs,
+ const descriptors::ConcatLayerDescriptor &concat_descriptor);
/** Adds an depth to space layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -212,11 +243,18 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_depthwise_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
- Size2D kernel_spatial_extend, PadStrideInfo conv_info, int depth_multiplier = 1,
- DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::Default,
- ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr, const QuantizationInfo &quant_info = QuantizationInfo(),
- const QuantizationInfo &out_quant_info = QuantizationInfo());
+ static NodeID
+ add_depthwise_convolution_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ Size2D kernel_spatial_extend,
+ PadStrideInfo conv_info,
+ int depth_multiplier = 1,
+ DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::Default,
+ ITensorAccessorUPtr weights_accessor = nullptr,
+ ITensorAccessorUPtr bias_accessor = nullptr,
+ const QuantizationInfo &quant_info = QuantizationInfo(),
+ const QuantizationInfo &out_quant_info = QuantizationInfo());
/** Adds an element-wise layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -227,7 +265,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_elementwise_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation);
+ static NodeID add_elementwise_node(
+ Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation);
/** Adds a dequantization node to the graph
*
* @param[in] g Graph to add the node to
@@ -248,7 +287,12 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_detection_output_node(Graph &g, NodeParams params, NodeIdxPair input_loc, NodeIdxPair input_conf, NodeIdxPair input_priorbox, const DetectionOutputLayerInfo &detect_info);
+ static NodeID add_detection_output_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input_loc,
+ NodeIdxPair input_conf,
+ NodeIdxPair input_priorbox,
+ const DetectionOutputLayerInfo &detect_info);
/** Adds a detection post process layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -261,8 +305,12 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_detection_post_process_node(Graph &g, NodeParams params, NodeIdxPair input_box_encoding, NodeIdxPair input_class_prediction,
- const DetectionPostProcessLayerInfo &detect_info, ITensorAccessorUPtr anchors_accessor = nullptr,
+ static NodeID add_detection_post_process_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input_box_encoding,
+ NodeIdxPair input_class_prediction,
+ const DetectionPostProcessLayerInfo &detect_info,
+ ITensorAccessorUPtr anchors_accessor = nullptr,
const QuantizationInfo &anchor_quant_info = QuantizationInfo());
/** Adds a Dummy node to the graph
*
@@ -299,8 +347,12 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_fully_connected_layer(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_outputs,
- NodeID weights_nid, NodeID bias_nid = EmptyNodeID,
+ static NodeID add_fully_connected_layer(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ unsigned int num_outputs,
+ NodeID weights_nid,
+ NodeID bias_nid = EmptyNodeID,
const FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(),
const QuantizationInfo &out_quant_info = QuantizationInfo(),
FastMathHint fast_math_hint = FastMathHint::Disabled);
@@ -319,9 +371,13 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_fully_connected_layer(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_outputs,
- ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr,
- const FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(),
+ static NodeID add_fully_connected_layer(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ unsigned int num_outputs,
+ ITensorAccessorUPtr weights_accessor = nullptr,
+ ITensorAccessorUPtr bias_accessor = nullptr,
+ const FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(),
const QuantizationInfo &weights_quant_info = QuantizationInfo(),
const QuantizationInfo &out_quant_info = QuantizationInfo(),
FastMathHint fast_math_hint = FastMathHint::Disabled);
@@ -336,8 +392,12 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_generate_proposals_node(Graph &g, NodeParams params, NodeIdxPair scores, NodeIdxPair deltas,
- NodeIdxPair anchors, GenerateProposalsInfo info);
+ static NodeID add_generate_proposals_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair scores,
+ NodeIdxPair deltas,
+ NodeIdxPair anchors,
+ GenerateProposalsInfo info);
/** Adds a L2 Normalize layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -358,7 +418,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, NormalizationLayerInfo norm_info);
+ static NodeID
+ add_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, NormalizationLayerInfo norm_info);
/** Adds a normalize planar YUV layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -369,8 +430,11 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_normalize_planar_yuv_node(Graph &g, NodeParams params, NodeIdxPair input,
- ITensorAccessorUPtr mean_accessor = nullptr, ITensorAccessorUPtr std_accessor = nullptr);
+ static NodeID add_normalize_planar_yuv_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ ITensorAccessorUPtr mean_accessor = nullptr,
+ ITensorAccessorUPtr std_accessor = nullptr);
/** Adds a pad layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -382,7 +446,11 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_pad_node(Graph &g, NodeParams params, NodeIdxPair input, const PaddingList &paddings, PixelValue pad_value = PixelValue());
+ static NodeID add_pad_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ const PaddingList &paddings,
+ PixelValue pad_value = PixelValue());
/** Adds a permute layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -394,7 +462,11 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_permute_node(Graph &g, NodeParams params, NodeIdxPair input, PermutationVector perm, DataLayout layout = DataLayout::UNKNOWN);
+ static NodeID add_permute_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ PermutationVector perm,
+ DataLayout layout = DataLayout::UNKNOWN);
/** Adds a pooling layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -426,8 +498,12 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_print_node(Graph &g, NodeParams params, NodeIdxPair input, std::ostream &stream, const IOFormatInfo &format_info = IOFormatInfo(),
- const std::function<ITensor *(ITensor *)> transform = nullptr);
+ static NodeID add_print_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ std::ostream &stream,
+ const IOFormatInfo &format_info = IOFormatInfo(),
+ const std::function<ITensor *(ITensor *)> transform = nullptr);
/** Adds a priorbox layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -438,7 +514,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_priorbox_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, const PriorBoxLayerInfo &prior_info);
+ static NodeID add_priorbox_node(
+ Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, const PriorBoxLayerInfo &prior_info);
/** Adds a quantization layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -448,7 +525,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_quantization_node(Graph &g, NodeParams params, NodeIdxPair input, const QuantizationInfo &out_quant_info);
+ static NodeID
+ add_quantization_node(Graph &g, NodeParams params, NodeIdxPair input, const QuantizationInfo &out_quant_info);
/** Adds a reduction sum layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -460,7 +538,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_reduction_operation_node(Graph &g, NodeParams params, NodeIdxPair input, ReductionOperation op, int axis, bool keep_dims = true);
+ static NodeID add_reduction_operation_node(
+ Graph &g, NodeParams params, NodeIdxPair input, ReductionOperation op, int axis, bool keep_dims = true);
/** Adds a reorg layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -492,7 +571,12 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_resize_node(Graph &g, NodeParams params, NodeIdxPair input, InterpolationPolicy policy, float width_scale, float height_scale);
+ static NodeID add_resize_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ InterpolationPolicy policy,
+ float width_scale,
+ float height_scale);
/** Adds a ROI align layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -503,7 +587,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_roi_align_node(Graph &g, NodeParams params, NodeIdxPair input, NodeIdxPair rois, ROIPoolingLayerInfo pool_info);
+ static NodeID
+ add_roi_align_node(Graph &g, NodeParams params, NodeIdxPair input, NodeIdxPair rois, ROIPoolingLayerInfo pool_info);
/** Adds a scale layer node to the graph
* This layer computes a product of the input with a scale (read from mul_accessor) and it applies an offset (read from add_accessor).
* output = input * mul_w + add_w
@@ -516,8 +601,11 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_scale_layer(Graph &g, const NodeParams &params, NodeIdxPair input,
- ITensorAccessorUPtr mul_accessor = nullptr, ITensorAccessorUPtr add_accessor = nullptr);
+ static NodeID add_scale_layer(Graph &g,
+ const NodeParams &params,
+ NodeIdxPair input,
+ ITensorAccessorUPtr mul_accessor = nullptr,
+ ITensorAccessorUPtr add_accessor = nullptr);
/** Adds a softmax node to the graph
*
* @param[in] g Graph to add the node to
@@ -538,7 +626,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_slice_node(Graph &g, NodeParams params, NodeIdxPair input, Coordinates &starts, Coordinates &ends);
+ static NodeID
+ add_slice_node(Graph &g, NodeParams params, NodeIdxPair input, Coordinates &starts, Coordinates &ends);
/** Adds a split node to the graph
*
* @param[in] g Graph to add the node to
@@ -549,7 +638,8 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_split_node(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_splits, unsigned int axis = 0);
+ static NodeID
+ add_split_node(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_splits, unsigned int axis = 0);
/** Adds a stack layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -572,7 +662,13 @@ public:
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_strided_slice_node(Graph &g, NodeParams params, NodeIdxPair input, Coordinates &starts, Coordinates &ends, BiStrides &strides, StridedSliceLayerInfo info);
+ static NodeID add_strided_slice_node(Graph &g,
+ NodeParams params,
+ NodeIdxPair input,
+ Coordinates &starts,
+ Coordinates &ends,
+ BiStrides &strides,
+ StridedSliceLayerInfo info);
/** Adds a yolo layer to the graph
*
* @param[in] g Graph to add the node to
diff --git a/arm_compute/graph/GraphContext.h b/arm_compute/graph/GraphContext.h
index 7beb598646..68fbaf5478 100644
--- a/arm_compute/graph/GraphContext.h
+++ b/arm_compute/graph/GraphContext.h
@@ -25,7 +25,6 @@
#define ARM_COMPUTE_GRAPH_GRAPH_CONTEXT_H
#include "arm_compute/graph/Types.h"
-
#include "arm_compute/runtime/IMemoryManager.h"
#include "arm_compute/runtime/IWeightsManager.h"
@@ -39,18 +38,18 @@ namespace graph
/** Contains structs required for memory management */
struct MemoryManagerContext
{
- Target target = { Target::UNSPECIFIED }; /**< Target */
- std::shared_ptr<arm_compute::IMemoryManager> intra_mm = { nullptr }; /**< Intra-function memory manager */
- std::shared_ptr<arm_compute::IMemoryManager> cross_mm = { nullptr }; /**< Cross-function memory manager */
- std::shared_ptr<arm_compute::IMemoryGroup> cross_group = { nullptr }; /**< Cross-function memory group */
- IAllocator *allocator = { nullptr }; /**< Backend allocator to use */
+ Target target = {Target::UNSPECIFIED}; /**< Target */
+ std::shared_ptr<arm_compute::IMemoryManager> intra_mm = {nullptr}; /**< Intra-function memory manager */
+ std::shared_ptr<arm_compute::IMemoryManager> cross_mm = {nullptr}; /**< Cross-function memory manager */
+ std::shared_ptr<arm_compute::IMemoryGroup> cross_group = {nullptr}; /**< Cross-function memory group */
+ IAllocator *allocator = {nullptr}; /**< Backend allocator to use */
};
/** Contains structs required for weights management */
struct WeightsManagerContext
{
- Target target = { Target::UNSPECIFIED }; /**< Target */
- std::shared_ptr<arm_compute::IWeightsManager> wm = { nullptr }; /**< Weights manager */
+ Target target = {Target::UNSPECIFIED}; /**< Target */
+ std::shared_ptr<arm_compute::IWeightsManager> wm = {nullptr}; /**< Weights manager */
};
/** Graph context **/
@@ -125,7 +124,7 @@ public:
void finalize();
private:
- GraphConfig _config; /**< Graph configuration */
+ GraphConfig _config; /**< Graph configuration */
std::map<Target, MemoryManagerContext> _memory_managers; /**< Memory managers for each target */
std::map<Target, WeightsManagerContext> _weights_managers; /**< Weights managers for each target */
};
diff --git a/arm_compute/graph/IDeviceBackend.h b/arm_compute/graph/IDeviceBackend.h
index f84aac0ae0..8ae92e3177 100644
--- a/arm_compute/graph/IDeviceBackend.h
+++ b/arm_compute/graph/IDeviceBackend.h
@@ -88,7 +88,8 @@ public:
*
* @return Backend sub-tensor handle
*/
- virtual std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) = 0;
+ virtual std::unique_ptr<ITensorHandle>
+ create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) = 0;
/** Configure a backend Node
*
* @note This creates an appropriate configured backend function for the given node
diff --git a/arm_compute/graph/LayerDescriptors.h b/arm_compute/graph/LayerDescriptors.h
index c11174f2ce..d632ed9e78 100644
--- a/arm_compute/graph/LayerDescriptors.h
+++ b/arm_compute/graph/LayerDescriptors.h
@@ -37,8 +37,7 @@ namespace descriptors
struct ConcatLayerDescriptor
{
/** Default constructor */
- ConcatLayerDescriptor()
- : axis(DataLayoutDimension::CHANNEL), output_qinfo()
+ ConcatLayerDescriptor() : axis(DataLayoutDimension::CHANNEL), output_qinfo()
{
}
@@ -46,8 +45,7 @@ struct ConcatLayerDescriptor
*
* @param[in] axis Axis.
*/
- ConcatLayerDescriptor(DataLayoutDimension axis)
- : axis(axis), output_qinfo()
+ ConcatLayerDescriptor(DataLayoutDimension axis) : axis(axis), output_qinfo()
{
}
@@ -76,9 +74,16 @@ struct EltwiseLayerDescriptor
* @param[in] r_policy (Optional) Rounding policy used for the operation. Defaults to @ref RoundingPolicy::TO_ZERO
* @param[in] fused_activation (Optional) Fused activation information. Defaults to empty (identity) @ref ActivationLayerInfo
*/
- EltwiseLayerDescriptor(EltwiseOperation op, QuantizationInfo out_quant_info = QuantizationInfo(), ConvertPolicy c_policy = ConvertPolicy::SATURATE, RoundingPolicy r_policy = RoundingPolicy::TO_ZERO,
+ EltwiseLayerDescriptor(EltwiseOperation op,
+ QuantizationInfo out_quant_info = QuantizationInfo(),
+ ConvertPolicy c_policy = ConvertPolicy::SATURATE,
+ RoundingPolicy r_policy = RoundingPolicy::TO_ZERO,
ActivationLayerInfo fused_activation = ActivationLayerInfo())
- : op(op), out_quant_info(out_quant_info), c_policy(c_policy), r_policy(r_policy), fused_activation(fused_activation)
+ : op(op),
+ out_quant_info(out_quant_info),
+ c_policy(c_policy),
+ r_policy(r_policy),
+ fused_activation(fused_activation)
{
}
@@ -100,10 +105,16 @@ struct UnaryEltwiseLayerDescriptor
* @param[in] r_policy (Optional) Rounding policy used for the operation. Defaults to @ref RoundingPolicy::TO_ZERO
* @param[in] fused_activation (Optional) Fused activation information. Defaults to empty (identity) @ref ActivationLayerInfo
*/
- UnaryEltwiseLayerDescriptor(UnaryEltwiseOperation op, QuantizationInfo out_quant_info = QuantizationInfo(), ConvertPolicy c_policy = ConvertPolicy::SATURATE,
- RoundingPolicy r_policy = RoundingPolicy::TO_ZERO,
- ActivationLayerInfo fused_activation = ActivationLayerInfo())
- : op(op), out_quant_info(out_quant_info), c_policy(c_policy), r_policy(r_policy), fused_activation(fused_activation)
+ UnaryEltwiseLayerDescriptor(UnaryEltwiseOperation op,
+ QuantizationInfo out_quant_info = QuantizationInfo(),
+ ConvertPolicy c_policy = ConvertPolicy::SATURATE,
+ RoundingPolicy r_policy = RoundingPolicy::TO_ZERO,
+ ActivationLayerInfo fused_activation = ActivationLayerInfo())
+ : op(op),
+ out_quant_info(out_quant_info),
+ c_policy(c_policy),
+ r_policy(r_policy),
+ fused_activation(fused_activation)
{
}
@@ -130,7 +141,7 @@ struct DeconvolutionLayerDescriptor
PadStrideInfo info; /**< Padding and stride information */
QuantizationInfo out_quant_info; /**< Output quantization information */
};
-} // namespace descriptor
+} // namespace descriptors
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_LAYER_DESCRIPTORS_H */ \ No newline at end of file
+#endif /* ARM_COMPUTE_LAYER_DESCRIPTORS_H */
diff --git a/arm_compute/graph/Logger.h b/arm_compute/graph/Logger.h
index 872c650a1a..e83d5f4ddc 100644
--- a/arm_compute/graph/Logger.h
+++ b/arm_compute/graph/Logger.h
@@ -31,14 +31,14 @@
*
* @note It will eventually create all default loggers in don't exist
*/
-#define ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER() \
- do \
- { \
- if(arm_compute::logging::LoggerRegistry::get().logger("GRAPH") == nullptr) \
- { \
- arm_compute::logging::LoggerRegistry::get().create_reserved_loggers(); \
- } \
- } while(false)
+#define ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER() \
+ do \
+ { \
+ if (arm_compute::logging::LoggerRegistry::get().logger("GRAPH") == nullptr) \
+ { \
+ arm_compute::logging::LoggerRegistry::get().create_reserved_loggers(); \
+ } \
+ } while (false)
#else /* ARM_COMPUTE_LOGGING_ENABLED */
#define ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER()
#endif /* ARM_COMPUTE_LOGGING_ENABLED */
diff --git a/arm_compute/graph/Tensor.h b/arm_compute/graph/Tensor.h
index de96c998bd..0ffae28ecc 100644
--- a/arm_compute/graph/Tensor.h
+++ b/arm_compute/graph/Tensor.h
@@ -24,11 +24,10 @@
#ifndef ARM_COMPUTE_GRAPH_TENSOR_H
#define ARM_COMPUTE_GRAPH_TENSOR_H
-#include "arm_compute/graph/Types.h"
-
#include "arm_compute/graph/ITensorAccessor.h"
#include "arm_compute/graph/ITensorHandle.h"
#include "arm_compute/graph/TensorDescriptor.h"
+#include "arm_compute/graph/Types.h"
#include <memory>
#include <set>
diff --git a/arm_compute/graph/TensorDescriptor.h b/arm_compute/graph/TensorDescriptor.h
index 5fa155efc8..46a6ab2c27 100644
--- a/arm_compute/graph/TensorDescriptor.h
+++ b/arm_compute/graph/TensorDescriptor.h
@@ -52,7 +52,11 @@ struct TensorDescriptor final : public misc::ICloneable<TensorDescriptor>
QuantizationInfo tensor_quant_info = QuantizationInfo(),
DataLayout tensor_data_layout = DataLayout::NCHW,
Target tensor_target = Target::UNSPECIFIED)
- : shape(tensor_shape), data_type(tensor_data_type), layout(tensor_data_layout), quant_info(tensor_quant_info), target(tensor_target)
+ : shape(tensor_shape),
+ data_type(tensor_data_type),
+ layout(tensor_data_layout),
+ quant_info(tensor_quant_info),
+ target(tensor_target)
{
}
/** Sets tensor descriptor shape
@@ -106,11 +110,11 @@ struct TensorDescriptor final : public misc::ICloneable<TensorDescriptor>
return std::make_unique<TensorDescriptor>(*this);
}
- TensorShape shape{}; /**< Tensor shape */
- DataType data_type{ DataType::UNKNOWN }; /**< Data type */
- DataLayout layout{ DataLayout::NCHW }; /**< Data layout */
- QuantizationInfo quant_info{}; /**< Quantization info */
- Target target{ Target::UNSPECIFIED }; /**< Target */
+ TensorShape shape{}; /**< Tensor shape */
+ DataType data_type{DataType::UNKNOWN}; /**< Data type */
+ DataLayout layout{DataLayout::NCHW}; /**< Data layout */
+ QuantizationInfo quant_info{}; /**< Quantization info */
+ Target target{Target::UNSPECIFIED}; /**< Target */
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/TypePrinter.h b/arm_compute/graph/TypePrinter.h
index 9df4eba5ec..5e83820ab3 100644
--- a/arm_compute/graph/TypePrinter.h
+++ b/arm_compute/graph/TypePrinter.h
@@ -37,7 +37,7 @@ namespace graph
/** Formatted output of the Target. */
inline ::std::ostream &operator<<(::std::ostream &os, const Target &target)
{
- switch(target)
+ switch (target)
{
case Target::UNSPECIFIED:
os << "UNSPECIFIED";
@@ -60,7 +60,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const Target &target)
inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
{
- switch(node_type)
+ switch (node_type)
{
case NodeType::ActivationLayer:
os << "ActivationLayer";
@@ -207,7 +207,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
/** Formatted output of the EltwiseOperation type. */
inline ::std::ostream &operator<<(::std::ostream &os, const EltwiseOperation &eltwise_op)
{
- switch(eltwise_op)
+ switch (eltwise_op)
{
case EltwiseOperation::Add:
os << "Add";
@@ -231,7 +231,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const EltwiseOperation &el
/** Formatted output of the ConvolutionMethod type. */
inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionMethod &method)
{
- switch(method)
+ switch (method)
{
case ConvolutionMethod::Default:
os << "Default";
@@ -255,7 +255,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionMethod &m
/** Formatted output of the FastMathHint type. */
inline ::std::ostream &operator<<(::std::ostream &os, const FastMathHint &hint)
{
- switch(hint)
+ switch (hint)
{
case FastMathHint::Enabled:
os << "Enabled";
@@ -273,7 +273,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const FastMathHint &hint)
/** Formatted output of the DepthwiseConvolutionMethod type. */
inline ::std::ostream &operator<<(::std::ostream &os, const DepthwiseConvolutionMethod &method)
{
- switch(method)
+ switch (method)
{
case DepthwiseConvolutionMethod::Default:
os << "DEFAULT";
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index 8d493403b3..5541e3cbcc 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -86,17 +86,18 @@ struct TensorDescriptor;
/** Graph configuration structure */
struct GraphConfig
{
- bool use_function_memory_manager{ true }; /**< Use a memory manager to manage per-function auxilary memory */
- bool use_function_weights_manager{ true }; /**< Use a weights manager to manage transformed weights */
- bool use_transition_memory_manager{ true }; /**< Use a memory manager to manager transition buffer memory */
- bool use_tuner{ false }; /**< Use a tuner in tunable backends */
- bool use_synthetic_type{ false }; /**< Convert graph to a synthetic graph for a data type */
- DataType synthetic_type{ DataType::QASYMM8 }; /**< The data type of the synthetic graph */
- CLTunerMode tuner_mode{ CLTunerMode::EXHAUSTIVE }; /**< Tuner mode to be used by the CL tuner */
- int num_threads{ -1 }; /**< Number of threads to use (thread capable backends), if 0 the backend will auto-initialize, if -1 the backend will stay as it is. */
- std::string tuner_file{ "acl_tuner.csv" }; /**< File to load/store tuning values from */
- std::string mlgo_file{ "heuristics.mlgo" }; /**< Filename to load MLGO heuristics from */
- CLBackendType backend_type{ CLBackendType::Native }; /**< CL backend type to use */
+ bool use_function_memory_manager{true}; /**< Use a memory manager to manage per-function auxilary memory */
+ bool use_function_weights_manager{true}; /**< Use a weights manager to manage transformed weights */
+ bool use_transition_memory_manager{true}; /**< Use a memory manager to manager transition buffer memory */
+ bool use_tuner{false}; /**< Use a tuner in tunable backends */
+ bool use_synthetic_type{false}; /**< Convert graph to a synthetic graph for a data type */
+ DataType synthetic_type{DataType::QASYMM8}; /**< The data type of the synthetic graph */
+ CLTunerMode tuner_mode{CLTunerMode::EXHAUSTIVE}; /**< Tuner mode to be used by the CL tuner */
+ int num_threads{
+ -1}; /**< Number of threads to use (thread capable backends), if 0 the backend will auto-initialize, if -1 the backend will stay as it is. */
+ std::string tuner_file{"acl_tuner.csv"}; /**< File to load/store tuning values from */
+ std::string mlgo_file{"heuristics.mlgo"}; /**< Filename to load MLGO heuristics from */
+ CLBackendType backend_type{CLBackendType::Native}; /**< CL backend type to use */
};
/**< Device target types */
diff --git a/arm_compute/graph/Utils.h b/arm_compute/graph/Utils.h
index a3d9012ee9..9813ff05c7 100644
--- a/arm_compute/graph/Utils.h
+++ b/arm_compute/graph/Utils.h
@@ -36,7 +36,7 @@ class GraphContext;
inline bool is_utility_node(INode *node)
{
- std::set<NodeType> utility_node_types = { NodeType::PrintLayer };
+ std::set<NodeType> utility_node_types = {NodeType::PrintLayer};
return utility_node_types.find(node->type()) != utility_node_types.end();
}
diff --git a/arm_compute/graph/Workload.h b/arm_compute/graph/Workload.h
index 5b4533cb6f..8ff0a548ae 100644
--- a/arm_compute/graph/Workload.h
+++ b/arm_compute/graph/Workload.h
@@ -69,8 +69,7 @@ public:
*/
struct ExecutionTask
{
- ExecutionTask(std::unique_ptr<arm_compute::IFunction> &&f, INode *n)
- : task(std::move(f)), node(n)
+ ExecutionTask(std::unique_ptr<arm_compute::IFunction> &&f, INode *n) : task(std::move(f)), node(n)
{
}
/** Prevent instances of this class from being copied (As this class contains pointers) */
@@ -97,11 +96,11 @@ struct ExecutionTask
/** Execution workload */
struct ExecutionWorkload
{
- std::vector<Tensor *> inputs = {}; /**< Input handles */
- std::vector<Tensor *> outputs = {}; /**< Output handles */
- std::vector<ExecutionTask> tasks = {}; /**< Execution workload */
- Graph *graph = { nullptr }; /**< Graph bound to the workload */
- GraphContext *ctx = { nullptr }; /**< Graph execution context */
+ std::vector<Tensor *> inputs = {}; /**< Input handles */
+ std::vector<Tensor *> outputs = {}; /**< Output handles */
+ std::vector<ExecutionTask> tasks = {}; /**< Execution workload */
+ Graph *graph = {nullptr}; /**< Graph bound to the workload */
+ GraphContext *ctx = {nullptr}; /**< Graph execution context */
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/backends/BackendRegistrar.h b/arm_compute/graph/backends/BackendRegistrar.h
index 902c12b0a6..2879361fef 100644
--- a/arm_compute/graph/backends/BackendRegistrar.h
+++ b/arm_compute/graph/backends/BackendRegistrar.h
@@ -24,8 +24,8 @@
#ifndef ARM_COMPUTE_GRAPH_BACKEND_REGISTRAR_H
#define ARM_COMPUTE_GRAPH_BACKEND_REGISTRAR_H
-#include "arm_compute/graph/Types.h"
#include "arm_compute/graph/backends/BackendRegistry.h"
+#include "arm_compute/graph/Types.h"
#include <utility>
@@ -58,4 +58,4 @@ inline BackendRegistrar<T>::BackendRegistrar(Target target)
} // namespace backends
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_BACKEND_REGISTRAR_H */ \ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_BACKEND_REGISTRAR_H */
diff --git a/arm_compute/graph/backends/CL/CLDeviceBackend.h b/arm_compute/graph/backends/CL/CLDeviceBackend.h
index 63674ad794..09e19d7688 100644
--- a/arm_compute/graph/backends/CL/CLDeviceBackend.h
+++ b/arm_compute/graph/backends/CL/CLDeviceBackend.h
@@ -25,7 +25,6 @@
#define ARM_COMPUTE_GRAPH_CLDEVICEBACKEND_H
#include "arm_compute/graph/IDeviceBackend.h"
-
#include "arm_compute/runtime/CL/CLBufferAllocator.h"
#include "arm_compute/runtime/CL/CLGEMMHeuristicsHandle.h"
#include "arm_compute/runtime/CL/CLTuner.h"
@@ -59,22 +58,23 @@ public:
void set_kernel_tuning_mode(CLTunerMode tuning_mode);
// Inherited overridden methods
- void initialize_backend() override;
- void setup_backend_context(GraphContext &ctx) override;
- void release_backend_context(GraphContext &ctx) override;
+ void initialize_backend() override;
+ void setup_backend_context(GraphContext &ctx) override;
+ void release_backend_context(GraphContext &ctx) override;
bool is_backend_supported() override;
IAllocator *backend_allocator() override;
std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
- std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
- std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
- Status validate_node(INode &node) override;
- std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
+ std::unique_ptr<ITensorHandle>
+ create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
+ std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
+ Status validate_node(INode &node) override;
+ std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
std::shared_ptr<arm_compute::IWeightsManager> create_weights_manager() override;
void sync() override;
private:
- int _context_count; /**< Counts how many contexts are currently using the backend */
- CLTuner _tuner; /**< CL kernel tuner */
+ int _context_count; /**< Counts how many contexts are currently using the backend */
+ CLTuner _tuner; /**< CL kernel tuner */
CLGEMMHeuristicsHandle _gemm_heuristics; /**< GEMM heuristics */
std::unique_ptr<CLBufferAllocator> _allocator; /**< CL buffer affinity allocator */
std::string _tuner_file; /**< Filename to load/store the tuner's values from */
diff --git a/arm_compute/graph/backends/CL/CLSubTensorHandle.h b/arm_compute/graph/backends/CL/CLSubTensorHandle.h
index 3750fc85ee..85eebec639 100644
--- a/arm_compute/graph/backends/CL/CLSubTensorHandle.h
+++ b/arm_compute/graph/backends/CL/CLSubTensorHandle.h
@@ -25,7 +25,6 @@
#define ARM_COMPUTE_GRAPH_CLSUBTENSORHANDLE_H
#include "arm_compute/graph/ITensorHandle.h"
-
#include "arm_compute/runtime/CL/CLSubTensor.h"
namespace arm_compute
@@ -45,7 +44,10 @@ public:
* @param[in] coords Starting coordinates
* @param[in] extend_parent Extends parent shape if true
*/
- CLSubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords, bool extend_parent = false);
+ CLSubTensorHandle(ITensorHandle *parent_handle,
+ const TensorShape &shape,
+ const Coordinates &coords,
+ bool extend_parent = false);
/** Destructor: free the tensor's memory */
~CLSubTensorHandle() = default;
/** Allow instances of this class to be move constructed */
@@ -58,10 +60,10 @@ public:
CLSubTensorHandle &operator=(const CLSubTensorHandle &) = delete;
// Inherited overridden methods
- void allocate() override;
- void free() override;
- void manage(IMemoryGroup *mg) override;
- void map(bool blocking) override;
+ void allocate() override;
+ void free() override;
+ void manage(IMemoryGroup *mg) override;
+ void map(bool blocking) override;
void unmap() override;
void release_if_unused() override;
arm_compute::ITensor &tensor() override;
diff --git a/arm_compute/graph/backends/CL/CLTensorHandle.h b/arm_compute/graph/backends/CL/CLTensorHandle.h
index 16e30efc43..57e9794ec3 100644
--- a/arm_compute/graph/backends/CL/CLTensorHandle.h
+++ b/arm_compute/graph/backends/CL/CLTensorHandle.h
@@ -25,7 +25,6 @@
#define ARM_COMPUTE_GRAPH_CLTENSORHANDLE_H
#include "arm_compute/graph/ITensorHandle.h"
-
#include "arm_compute/runtime/CL/CLTensor.h"
namespace arm_compute
@@ -51,10 +50,10 @@ public:
CLTensorHandle &operator=(CLTensorHandle &&) = default;
// Inherited overridden methods
- void allocate() override;
- void free() override;
- void manage(IMemoryGroup *mg) override;
- void map(bool blocking) override;
+ void allocate() override;
+ void free() override;
+ void manage(IMemoryGroup *mg) override;
+ void map(bool blocking) override;
void unmap() override;
void release_if_unused() override;
arm_compute::ITensor &tensor() override;
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index 877e1f92e4..fd8b6b5a69 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -24,19 +24,19 @@
#ifndef ACL_ARM_COMPUTE_GRAPH_BACKENDS_FUNCTIONHELPERS_H
#define ACL_ARM_COMPUTE_GRAPH_BACKENDS_FUNCTIONHELPERS_H
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensorInfo.h"
+#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
+#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
+#include "arm_compute/graph/backends/Utils.h"
#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/nodes/Nodes.h"
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/graph/TypePrinter.h"
#include "arm_compute/graph/Types.h"
#include "arm_compute/graph/Utils.h"
-#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
-#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
-#include "arm_compute/graph/backends/Utils.h"
-#include "arm_compute/graph/nodes/Nodes.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensorInfo.h"
#include "support/Cast.h"
namespace arm_compute
@@ -59,13 +59,16 @@ template <typename TargetInfo>
typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
{
typename TargetInfo::TensorType *backing_tensor = nullptr;
- if(tensor != nullptr)
+ if (tensor != nullptr)
{
ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
// Get backing tensor handle
ITensorHandle *tensor_handle = tensor->handle();
// Get backing tensor
- backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
+ backing_tensor = (tensor_handle != nullptr)
+ ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(
+ &tensor_handle->tensor())
+ : nullptr;
}
return backing_tensor;
@@ -74,11 +77,8 @@ typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *
template <typename TargetInfo>
void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " ID: " << node.id()
- << node.name()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type() << " Target: " << TargetInfo::TargetType
+ << " ID: " << node.id() << node.name() << std::endl);
ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
@@ -109,17 +109,11 @@ std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
auto func = std::make_unique<ActivationLayerFunction>();
func->configure(input, output, act_info);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Shape: " << input->info()->tensor_shape()
- << " Activation function: " << act_info.activation()
- << " a: " << act_info.a()
- << " b: " << act_info.b()
- << " InPlace : " << is_in_place_operation(input, output)
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO(
+ "Instantiated " << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input->info()->data_type() << " Shape: " << input->info()->tensor_shape()
+ << " Activation function: " << act_info.activation() << " a: " << act_info.a() << " b: "
+ << act_info.b() << " InPlace : " << is_in_place_operation(input, output) << std::endl);
return func;
}
@@ -148,15 +142,10 @@ std::unique_ptr<IFunction> create_arg_min_max_layer(ArgMinMaxLayerNode &node)
auto func = std::make_unique<ArgMinMaxLayerFunction>();
func->configure(input, axis, output, op);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Shape: " << input->info()->tensor_shape()
- << " Reduction Operation: " << op
- << " axis: " << axis
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Shape: " << input->info()->tensor_shape()
+ << " Reduction Operation: " << op << " axis: " << axis << std::endl);
return func;
}
@@ -191,16 +180,11 @@ std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLa
func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Shape: " << input->info()->tensor_shape()
- << " Epsilon: " << epsilon << " "
- << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
- << " InPlace: " << is_in_place_operation(input, output)
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Shape: " << input->info()->tensor_shape() << " Epsilon: " << epsilon
+ << " " << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
+ << " InPlace: " << is_in_place_operation(input, output) << std::endl);
return func;
}
@@ -216,7 +200,8 @@ std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLa
* @return Backend batch normalization layer function
*/
template <typename FusedLayerTypes, typename TargetInfo>
-std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
+std::unique_ptr<IFunction>
+create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
{
validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
@@ -246,19 +231,16 @@ std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(Fu
// Create and configure function
std::tie(func, func_name) = create_named_memory_managed_function<FType>(
- std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
+ std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta,
+ gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
+ << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input->info()->data_type() << " Input shape: "
+ << input->info()->tensor_shape() << " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
- << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
- << std::endl);
+ << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") << std::endl);
return func;
}
@@ -273,7 +255,9 @@ std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(Fu
* @return Backend fused depthwise convolution batch normalization layer function
*/
template <typename FusedLayerTypes, typename TargetInfo>
-std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
+std::unique_ptr<IFunction>
+create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node,
+ GraphContext &ctx)
{
validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
@@ -302,19 +286,16 @@ std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalizatio
// Create and configure function
std::tie(func, func_name) = create_named_memory_managed_function<FType>(
- std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
+ std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var,
+ beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
+ << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input->info()->data_type() << " Input shape: "
+ << input->info()->tensor_shape() << " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
- << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
- << std::endl);
+ << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") << std::endl);
return func;
}
@@ -343,15 +324,11 @@ std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransf
func->configure(input, output, deltas, bbox_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Shape: " << input->info()->tensor_shape()
- << " BoundingBox Info img W: " << bbox_info.img_width() << " "
- << " BoundingBox Info img H: " << bbox_info.img_height() << " "
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO(
+ "Instantiated " << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input->info()->data_type() << " Shape: " << input->info()->tensor_shape()
+ << " BoundingBox Info img W: " << bbox_info.img_width() << " "
+ << " BoundingBox Info img H: " << bbox_info.img_height() << " " << std::endl);
return std::move(func);
}
@@ -379,14 +356,10 @@ std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode
auto func = std::make_unique<ChannelShuffleLayerFunction>();
func->configure(input, output, num_groups);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Shape: " << input->info()->tensor_shape()
- << " Num groups: " << num_groups
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Shape: " << input->info()->tensor_shape()
+ << " Num groups: " << num_groups << std::endl);
return func;
}
@@ -403,24 +376,25 @@ std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode
template <typename ConcatenateLayerFunction, typename TargetInfo>
std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
// Return nullptr if depth concatenate is switched off
- if(!node.is_enabled())
+ if (!node.is_enabled())
{
return nullptr;
}
// Extract IO and info
std::vector<typename TargetInfo::SrcTensorType *> inputs;
- for(unsigned int i = 0; i < node.num_inputs(); ++i)
+ for (unsigned int i = 0; i < node.num_inputs(); ++i)
{
inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
}
- typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
- const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
- const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
+ typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
+ const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
+ const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
// Create and configure function
auto func = std::make_unique<ConcatenateLayerFunction>();
@@ -429,20 +403,14 @@ std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLaye
// Log info
const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
std::ostringstream qss;
- if(is_quantized)
+ if (is_quantized)
{
qss << " Output QuantInfo: " << output->info()->quantization_info();
}
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << output->info()->data_type()
- << " Shape: " << output->info()->tensor_shape()
- << " Num Inputs: " << inputs.size()
- << " Axis: " << concat_axis
- << qss.str()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO(
+ "Instantiated " << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << output->info()->data_type() << " Shape: " << output->info()->tensor_shape()
+ << " Num Inputs: " << inputs.size() << " Axis: " << concat_axis << qss.str() << std::endl);
return func;
}
@@ -470,7 +438,7 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
- if(is_quantized)
+ if (is_quantized)
{
biases->info()->set_data_type(DataType::S32);
}
@@ -486,55 +454,50 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
std::unique_ptr<IFunction> func;
std::string func_name;
- if(conv_algorithm == ConvolutionMethod::Winograd)
+ if (conv_algorithm == ConvolutionMethod::Winograd)
{
ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
- std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
- std::string("WinogradConvolutionLayer"), mm,
- input, weights, biases, output, conv_info, fused_act, fast_math);
+ std::tie(func, func_name) =
+ create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
+ std::string("WinogradConvolutionLayer"), mm, input, weights, biases, output, conv_info, fused_act,
+ fast_math);
}
- else if(conv_algorithm == ConvolutionMethod::Direct)
+ else if (conv_algorithm == ConvolutionMethod::Direct)
{
ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
- std::string("DirectConvolutionLayer"),
- input, weights, biases, output, conv_info, fused_act);
+ std::string("DirectConvolutionLayer"), input, weights, biases, output, conv_info, fused_act);
}
- else if(conv_algorithm == ConvolutionMethod::GEMM)
+ else if (conv_algorithm == ConvolutionMethod::GEMM)
{
- std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
- std::string("GEMMConvolutionLayer"), mm,
- input, weights, biases, output, conv_info,
- WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
+ std::tie(func, func_name) =
+ create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
+ std::string("GEMMConvolutionLayer"), mm, input, weights, biases, output, conv_info, WeightsInfo(),
+ Size2D(1U, 1U), fused_act, num_groups);
}
else
{
- std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
- std::string("GenericConvolutionLayer"), mm,
- input, weights, biases, output, conv_info,
- WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
+ std::tie(func, func_name) =
+ create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
+ std::string("GenericConvolutionLayer"), mm, input, weights, biases, output, conv_info, WeightsInfo(),
+ Size2D(1U, 1U), fused_act, fast_math, num_groups);
}
// Log info
std::ostringstream qss;
- if(is_quantized)
+ if (is_quantized)
{
qss << " Input QuantInfo: " << input->info()->quantization_info()
<< " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Output QuantInfo: " << output->info()->quantization_info();
}
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << func_name
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Groups: " << num_groups
+ << node.name() << " Type: " << func_name << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input->info()->data_type() << " Groups: " << num_groups
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << qss.str()
- << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
- << std::endl);
+ << " Output shape: " << output->info()->tensor_shape() << qss.str()
+ << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") << std::endl);
return func;
}
@@ -566,19 +529,14 @@ std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &no
std::unique_ptr<IFunction> func;
std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
- std::string(), mm,
- input, weights, biases, output, deconv_info);
+ std::string(), mm, input, weights, biases, output, deconv_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Weights shape: " << weights->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -604,7 +562,7 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
- if(is_quantized)
+ if (is_quantized)
{
biases->info()->set_data_type(DataType::S32);
}
@@ -617,30 +575,25 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
std::unique_ptr<IFunction> func;
std::string func_name;
- std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
- std::string("DepthwiseConvolutionLayer"),
- input, weights, biases, output, conv_info, depth_multiplier, fused_act);
+ std::tie(func, func_name) =
+ create_named_function<DepthwiseConvolutionLayer>(std::string("DepthwiseConvolutionLayer"), input, weights,
+ biases, output, conv_info, depth_multiplier, fused_act);
// Log info
std::ostringstream qss;
- if(is_quantized)
+ if (is_quantized)
{
qss << " Input QuantInfo: " << input->info()->quantization_info()
<< " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Output QuantInfo: " << output->info()->quantization_info();
}
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << func_name
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
+ << node.name() << " Type: " << func_name << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input->info()->data_type() << " Input shape: "
+ << input->info()->tensor_shape() << " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
- << " Depth multiplier: " << depth_multiplier
- << qss.str()
- << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
- << std::endl);
+ << " Depth multiplier: " << depth_multiplier << qss.str()
+ << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") << std::endl);
return func;
}
@@ -670,15 +623,11 @@ std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &no
func->configure(input, output, node.block_shape());
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Block Size: " << node.block_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Block Size: " << node.block_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -709,15 +658,11 @@ std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &
func->configure(input, output);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Input quantization info: " << output->info()->quantization_info()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Input quantization info: " << output->info()->quantization_info()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -753,16 +698,12 @@ std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNod
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input0->info()->data_type()
- << " Input0 shape: " << input0->info()->tensor_shape()
- << " Input1 shape: " << input1->info()->tensor_shape()
+ << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input0->info()->data_type() << " Input0 shape: "
+ << input0->info()->tensor_shape() << " Input1 shape: " << input1->info()->tensor_shape()
<< " Input2 shape: " << input2->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
- << " DetectionOutputLayer info: " << detect_info
- << std::endl);
+ << " DetectionOutputLayer info: " << detect_info << std::endl);
return func;
}
@@ -805,19 +746,15 @@ std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProc
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input0->info()->data_type()
- << " Input0 shape: " << input0->info()->tensor_shape()
- << " Input1 shape: " << input1->info()->tensor_shape()
+ << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input0->info()->data_type() << " Input0 shape: "
+ << input0->info()->tensor_shape() << " Input1 shape: " << input1->info()->tensor_shape()
<< " Input2 shape: " << input2->info()->tensor_shape()
<< " Output0 shape: " << output0->info()->tensor_shape()
<< " Output1 shape: " << output1->info()->tensor_shape()
<< " Output2 shape: " << output2->info()->tensor_shape()
<< " Output3 shape: " << output3->info()->tensor_shape()
- << " DetectionPostProcessLayer info: " << detect_info
- << std::endl);
+ << " DetectionPostProcessLayer info: " << detect_info << std::endl);
return func;
}
@@ -849,35 +786,31 @@ std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
std::unique_ptr<IFunction> func = nullptr;
std::string func_name;
- if(eltwise_op == EltwiseOperation::Add)
+ if (eltwise_op == EltwiseOperation::Add)
{
std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
- std::string("ArithmeticAddition"),
- input1, input2, output, convert_policy, act_info);
+ std::string("ArithmeticAddition"), input1, input2, output, convert_policy, act_info);
}
- else if(eltwise_op == EltwiseOperation::Sub)
+ else if (eltwise_op == EltwiseOperation::Sub)
{
std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
- std::string("ArithmeticSubtraction"),
- input1, input2, output, convert_policy, act_info);
+ std::string("ArithmeticSubtraction"), input1, input2, output, convert_policy, act_info);
}
- else if(eltwise_op == EltwiseOperation::Mul)
+ else if (eltwise_op == EltwiseOperation::Mul)
{
std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
- std::string("PixelWiseMultiplication"),
- input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
+ std::string("PixelWiseMultiplication"), input1, input2, output, 1.f, convert_policy, node.rounding_policy(),
+ act_info);
}
- else if(eltwise_op == EltwiseOperation::Max)
+ else if (eltwise_op == EltwiseOperation::Max)
{
std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
- std::string("ElementwiseMaximum"),
- input1, input2, output, act_info);
+ std::string("ElementwiseMaximum"), input1, input2, output, act_info);
}
- else if(eltwise_op == EltwiseOperation::Div)
+ else if (eltwise_op == EltwiseOperation::Div)
{
std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
- std::string("ArithmeticDivision"),
- input1, input2, output, act_info);
+ std::string("ArithmeticDivision"), input1, input2, output, act_info);
}
else
{
@@ -885,14 +818,10 @@ std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
}
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Operation: " << func_name
- << " Data Type: " << input1->info()->data_type()
- << " Shape: " << input1->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType << " Operation: " << func_name
+ << " Data Type: " << input1->info()->data_type()
+ << " Shape: " << input1->info()->tensor_shape() << std::endl);
return func;
}
@@ -921,11 +850,10 @@ std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &nod
std::unique_ptr<IFunction> func = nullptr;
std::string func_name;
- if(eltwise_op == UnaryEltwiseOperation::Exp)
+ if (eltwise_op == UnaryEltwiseOperation::Exp)
{
- std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
- std::string("Exp"),
- input, output);
+ std::tie(func, func_name) =
+ create_named_function<typename UnaryEltwiseFunctions::Exp>(std::string("Exp"), input, output);
}
else
{
@@ -933,14 +861,10 @@ std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &nod
}
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Operation: " << func_name
- << " Data Type: " << input->info()->data_type()
- << " Shape: " << input->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType << " Operation: " << func_name
+ << " Data Type: " << input->info()->data_type()
+ << " Shape: " << input->info()->tensor_shape() << std::endl);
return func;
}
@@ -971,14 +895,10 @@ std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
func->configure(input, output);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -1020,22 +940,17 @@ std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode
// Log info
std::ostringstream qss;
- if(is_quantized)
+ if (is_quantized)
{
qss << " Input QuantInfo: " << input->info()->quantization_info()
<< " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Output QuantInfo: " << output->info()->quantization_info();
}
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << qss.str()
- << " Input shape: " << input->info()->tensor_shape()
- << " Weights shape: " << weights->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << qss.str() << " Input shape: " << input->info()->tensor_shape()
+ << " Weights shape: " << weights->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -1075,16 +990,14 @@ std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLaye
func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
- << " Data Type: " << scores->info()->data_type()
- << " Scores shape: " << scores->info()->tensor_shape()
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.type() << " Target " << TargetInfo::TargetType << " Data Type: "
+ << scores->info()->data_type() << " Scores shape: " << scores->info()->tensor_shape()
<< " Deltas shape: " << deltas->info()->tensor_shape()
<< " Anchors shape: " << anchors->info()->tensor_shape()
<< " Proposals shape: " << proposals->info()->tensor_shape()
<< " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
- << " Scores Out shape: " << scores_out->info()->tensor_shape()
- << std::endl);
+ << " Scores Out shape: " << scores_out->info()->tensor_shape() << std::endl);
return std::move(func);
}
@@ -1119,16 +1032,11 @@ std::unique_ptr<IFunction> create_l2_normalize_layer(L2NormalizeLayerNode &node,
func->configure(input, output, axis, epsilon);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " Axis: " << axis
- << " Epsilon: " << epsilon
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Axis: " << axis << " Epsilon: " << epsilon << std::endl);
return func;
}
@@ -1162,15 +1070,11 @@ std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &no
func->configure(input, output, norm_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " Normalization info: " << norm_info.type()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Normalization info: " << norm_info.type() << std::endl);
return std::move(func);
}
@@ -1204,13 +1108,9 @@ std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVL
func->configure(input, output, mean, std);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Shape: " << input->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Shape: " << input->info()->tensor_shape() << std::endl);
return std::move(func);
}
@@ -1242,14 +1142,10 @@ std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
func->configure(input, output, padding, pad_value);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -1280,15 +1176,11 @@ std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
func->configure(input, output, perm);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " Permutation vector: " << perm
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Permutation vector: " << perm << std::endl);
return func;
}
@@ -1319,15 +1211,11 @@ std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
func->configure(input, output, pool_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " Pooling info: " << pool_info.pool_type
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Pooling info: " << pool_info.pool_type << std::endl);
return func;
}
@@ -1358,14 +1246,10 @@ std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
func->configure(input, alpha, output);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -1388,13 +1272,9 @@ std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
ARM_COMPUTE_UNUSED(input);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape() << std::endl);
return nullptr;
}
@@ -1428,15 +1308,11 @@ std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input0->info()->data_type()
- << " Input0 shape: " << input0->info()->tensor_shape()
- << " Input1 shape: " << input1->info()->tensor_shape()
+ << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input0->info()->data_type() << " Input0 shape: "
+ << input0->info()->tensor_shape() << " Input1 shape: " << input1->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
- << " PriorBoxLayer info: " << prior_info
- << std::endl);
+ << " PriorBoxLayer info: " << prior_info << std::endl);
return func;
}
@@ -1466,14 +1342,10 @@ std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node
func->configure(input, output);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -1508,16 +1380,11 @@ std::unique_ptr<IFunction> create_reduction_operation_layer(ReductionLayerNode &
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
+ << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " Operation: " << op
- << " Axis: " << axis
- << " Keep dimensions:" << keep_dims
- << std::endl);
+ << " Output shape: " << output->info()->tensor_shape() << " Operation: " << op
+ << " Axis: " << axis << " Keep dimensions:" << keep_dims << std::endl);
return func;
}
@@ -1547,14 +1414,10 @@ std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
func->configure(input, output, node.stride());
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -1584,14 +1447,10 @@ std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
func->configure(input, output);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -1619,18 +1478,15 @@ std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
// Create and configure function
auto func = std::make_unique<ResizeLayerFunction>();
- func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
+ func->configure(input, output,
+ ScaleKernelInfo{policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false});
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " Interpolation: " << policy
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Interpolation: " << policy << std::endl);
return func;
}
@@ -1665,17 +1521,13 @@ std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
func->configure(input, rois, output, pool_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " ROIs shape: " << rois->info()->tensor_shape()
- << " ROIPooling width: " << pool_info.pooled_width()
- << " ROIPooling height: " << pool_info.pooled_height()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " ROIs shape: " << rois->info()->tensor_shape()
+ << " ROIPooling width: " << pool_info.pooled_width()
+ << " ROIPooling height: " << pool_info.pooled_height() << std::endl);
return std::move(func);
}
@@ -1705,14 +1557,10 @@ std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
func->configure(input, output, node.starts(), node.ends());
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -1744,14 +1592,10 @@ std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphCon
func->configure(input, output, beta);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
@@ -1768,12 +1612,13 @@ std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphCon
template <typename StackLayerFunction, typename TargetInfo>
std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
// Extract IO and info
std::vector<typename TargetInfo::TensorType *> inputs;
- for(unsigned int i = 0; i < node.num_inputs(); ++i)
+ for (unsigned int i = 0; i < node.num_inputs(); ++i)
{
inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
}
@@ -1785,16 +1630,12 @@ std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
func->configure(inputs, axis, output);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << output->info()->data_type()
- << " Inputs shape: " << inputs[0]->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " Num Inputs: " << inputs.size()
- << " Axis: " << axis
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << output->info()->data_type()
+ << " Inputs shape: " << inputs[0]->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " Num Inputs: " << inputs.size() << " Axis: " << axis << std::endl);
return func;
}
@@ -1829,14 +1670,10 @@ std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &nod
func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << TargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+ << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape() << std::endl);
return func;
}
diff --git a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h b/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h
index 19c627d479..27e21cbc7e 100644
--- a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h
+++ b/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h
@@ -70,15 +70,19 @@ public:
* @param[in] fused_act Activation layer information in case of a fused activation.
*
*/
- void configure(TensorType *input,
- TensorType *weights,
- TensorType *bias,
- TensorType *output,
- const TensorType *mean,
- const TensorType *var,
- const TensorType *beta,
- const TensorType *gamma,
- float epsilon, const PadStrideInfo &conv_info, unsigned int num_groups, bool fast_math, ActivationLayerInfo const &fused_act)
+ void configure(TensorType *input,
+ TensorType *weights,
+ TensorType *bias,
+ TensorType *output,
+ const TensorType *mean,
+ const TensorType *var,
+ const TensorType *beta,
+ const TensorType *gamma,
+ float epsilon,
+ const PadStrideInfo &conv_info,
+ unsigned int num_groups,
+ bool fast_math,
+ ActivationLayerInfo const &fused_act)
{
// We don't run any validate, as we assume that the layers have been already validated
const bool has_bias = (bias != nullptr);
@@ -86,7 +90,7 @@ public:
// We check if the layer has a bias. If yes, use it in-place. If not, we need to create one
// as batch normalization might end up with a bias != 0
- if(has_bias)
+ if (has_bias)
{
_fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon);
bias_to_use = bias;
@@ -97,9 +101,10 @@ public:
bias_to_use = &_fused_bias;
}
- _conv_layer.configure(input, weights, bias_to_use, output, conv_info, WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
+ _conv_layer.configure(input, weights, bias_to_use, output, conv_info, WeightsInfo(), Size2D(1U, 1U), fused_act,
+ fast_math, num_groups);
- if(!has_bias)
+ if (!has_bias)
{
_fused_bias.allocator()->allocate();
}
@@ -114,7 +119,7 @@ public:
void prepare()
{
- if(!_is_prepared)
+ if (!_is_prepared)
{
_fused_batch_norm_layer.run();
_is_prepared = true;
diff --git a/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h b/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
index 4f8a8da1fb..07a2cdd8b8 100644
--- a/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
+++ b/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
@@ -67,15 +67,18 @@ public:
* @param[in] fused_act Activation layer information in case of a fused activation.
*
*/
- void configure(TensorType *input,
- TensorType *weights,
- TensorType *bias,
- TensorType *output,
- const TensorType *mean,
- const TensorType *var,
- const TensorType *beta,
- const TensorType *gamma,
- float epsilon, const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo const &fused_act)
+ void configure(TensorType *input,
+ TensorType *weights,
+ TensorType *bias,
+ TensorType *output,
+ const TensorType *mean,
+ const TensorType *var,
+ const TensorType *beta,
+ const TensorType *gamma,
+ float epsilon,
+ const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier,
+ ActivationLayerInfo const &fused_act)
{
// We don't run any validate, as we assume that the layers have been already validated
const bool has_bias = (bias != nullptr);
@@ -83,20 +86,23 @@ public:
// We check if the layer has a bias. If yes, use it in-place. If not, we need to create one
// as batch normalization might end up with a bias != 0
- if(has_bias)
+ if (has_bias)
{
- _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon, FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
+ _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon,
+ FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
bias_to_use = bias;
}
else
{
- _fused_batch_norm_layer.configure(weights, mean, var, nullptr, &_fused_bias, nullptr, beta, gamma, epsilon, FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
+ _fused_batch_norm_layer.configure(weights, mean, var, nullptr, &_fused_bias, nullptr, beta, gamma, epsilon,
+ FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
bias_to_use = &_fused_bias;
}
- _depth_conv_layer.configure(input, weights, bias_to_use, output, conv_info, depth_multiplier, fused_act.enabled() ? fused_act : ActivationLayerInfo());
+ _depth_conv_layer.configure(input, weights, bias_to_use, output, conv_info, depth_multiplier,
+ fused_act.enabled() ? fused_act : ActivationLayerInfo());
- if(!has_bias)
+ if (!has_bias)
{
_fused_bias.allocator()->allocate();
}
@@ -111,7 +117,7 @@ public:
void prepare()
{
- if(!_is_prepared)
+ if (!_is_prepared)
{
_fused_batch_norm_layer.run();
_is_prepared = true;
diff --git a/arm_compute/graph/backends/NEON/NEDeviceBackend.h b/arm_compute/graph/backends/NEON/NEDeviceBackend.h
index 9cb37d4553..cd817a20d8 100644
--- a/arm_compute/graph/backends/NEON/NEDeviceBackend.h
+++ b/arm_compute/graph/backends/NEON/NEDeviceBackend.h
@@ -25,7 +25,6 @@
#define ARM_COMPUTE_GRAPH_NEDEVICEBACKEND_H
#include "arm_compute/graph/IDeviceBackend.h"
-
#include "arm_compute/runtime/Allocator.h"
namespace arm_compute
@@ -41,16 +40,17 @@ public:
NEDeviceBackend();
// Inherited overridden methods
- void initialize_backend() override;
- void setup_backend_context(GraphContext &ctx) override;
- void release_backend_context(GraphContext &ctx) override;
+ void initialize_backend() override;
+ void setup_backend_context(GraphContext &ctx) override;
+ void release_backend_context(GraphContext &ctx) override;
bool is_backend_supported() override;
IAllocator *backend_allocator() override;
std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
- std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
- std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
- Status validate_node(INode &node) override;
- std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
+ std::unique_ptr<ITensorHandle>
+ create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
+ std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
+ Status validate_node(INode &node) override;
+ std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
std::shared_ptr<arm_compute::IWeightsManager> create_weights_manager() override;
void sync() override;
diff --git a/arm_compute/graph/backends/NEON/NESubTensorHandle.h b/arm_compute/graph/backends/NEON/NESubTensorHandle.h
index a438b65735..3619f4ed1b 100644
--- a/arm_compute/graph/backends/NEON/NESubTensorHandle.h
+++ b/arm_compute/graph/backends/NEON/NESubTensorHandle.h
@@ -25,7 +25,6 @@
#define ARM_COMPUTE_GRAPH_NESUBTENSORHANDLE_H
#include "arm_compute/graph/ITensorHandle.h"
-
#include "arm_compute/runtime/SubTensor.h"
namespace arm_compute
@@ -45,7 +44,10 @@ public:
* @param[in] coords Starting coordinates
* @param[in] extend_parent Extends parent shape if true
*/
- NESubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords, bool extend_parent = false);
+ NESubTensorHandle(ITensorHandle *parent_handle,
+ const TensorShape &shape,
+ const Coordinates &coords,
+ bool extend_parent = false);
/** Destructor: free the tensor's memory */
~NESubTensorHandle() = default;
/** Allow instances of this class to be move constructed */
@@ -58,10 +60,10 @@ public:
NESubTensorHandle &operator=(const NESubTensorHandle &) = delete;
// Inherited overridden methods
- void allocate() override;
- void free() override;
- void manage(IMemoryGroup *mg) override;
- void map(bool blocking) override;
+ void allocate() override;
+ void free() override;
+ void manage(IMemoryGroup *mg) override;
+ void map(bool blocking) override;
void unmap() override;
void release_if_unused() override;
arm_compute::ITensor &tensor() override;
diff --git a/arm_compute/graph/backends/NEON/NETensorHandle.h b/arm_compute/graph/backends/NEON/NETensorHandle.h
index 99101a8fe9..1df90822ba 100644
--- a/arm_compute/graph/backends/NEON/NETensorHandle.h
+++ b/arm_compute/graph/backends/NEON/NETensorHandle.h
@@ -25,7 +25,6 @@
#define ARM_COMPUTE_GRAPH_NETENSORHANDLE_H
#include "arm_compute/graph/ITensorHandle.h"
-
#include "arm_compute/runtime/Tensor.h"
namespace arm_compute
@@ -51,10 +50,10 @@ public:
NETensorHandle &operator=(NETensorHandle &&) = default;
// Inherited overridden methods
- void allocate() override;
- void free() override;
- void manage(IMemoryGroup *mg) override;
- void map(bool blocking) override;
+ void allocate() override;
+ void free() override;
+ void manage(IMemoryGroup *mg) override;
+ void map(bool blocking) override;
void unmap() override;
void release_if_unused() override;
arm_compute::ITensor &tensor() override;
diff --git a/arm_compute/graph/backends/Utils.h b/arm_compute/graph/backends/Utils.h
index 774ce515b5..5f4e66c207 100644
--- a/arm_compute/graph/backends/Utils.h
+++ b/arm_compute/graph/backends/Utils.h
@@ -42,7 +42,8 @@ namespace backends
* @return A configured backend function
*/
template <typename FunctionType, typename FunctionNameType, typename... ParameterType>
-std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_named_function(FunctionNameType name, ParameterType... args)
+std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_named_function(FunctionNameType name,
+ ParameterType... args)
{
auto f = std::make_unique<FunctionType>();
f->configure(std::forward<ParameterType>(args)...);
@@ -58,9 +59,8 @@ std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_nam
* @return A configured backend function
*/
template <typename FunctionType, typename FunctionNameType, typename MemoryManagerType, typename... ParameterType>
-std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_named_memory_managed_function(FunctionNameType name,
- MemoryManagerType mm,
- ParameterType... args)
+std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType>
+create_named_memory_managed_function(FunctionNameType name, MemoryManagerType mm, ParameterType... args)
{
auto f = std::make_unique<FunctionType>(mm);
f->configure(std::forward<ParameterType>(args)...);
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index 71a6201554..0e102942a7 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -24,14 +24,13 @@
#ifndef ACL_ARM_COMPUTE_GRAPH_BACKENDS_VALIDATEHELPERS_H
#define ACL_ARM_COMPUTE_GRAPH_BACKENDS_VALIDATEHELPERS_H
-#include "arm_compute/graph/Logger.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/graph/nodes/Nodes.h"
-
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensorInfo.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/nodes/Nodes.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/Types.h"
namespace arm_compute
{
@@ -63,7 +62,8 @@ inline arm_compute::ITensorInfo *get_backing_tensor_info(arm_compute::graph::Ten
template <typename ArgMinMaxLayer>
Status validate_arg_min_max_layer(ArgMinMaxLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ArgMinMaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating ArgMinMaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -86,7 +86,8 @@ Status validate_arg_min_max_layer(ArgMinMaxLayerNode &node)
template <typename BoundingBoxTransformLayer>
Status validate_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating BoundingBoxTransformLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating BoundingBoxTransformLayer node with ID : " << node.id() << " and Name: "
+ << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -110,7 +111,8 @@ Status validate_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node
template <typename ChannelShuffleLayer>
Status validate_channel_shuffle_layer(ChannelShuffleLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ChannelShuffle node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating ChannelShuffle node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -133,10 +135,14 @@ Status validate_channel_shuffle_layer(ChannelShuffleLayerNode &node)
*
* @return Status
*/
-template <typename ConvolutionLayer, typename DirectConvolutionLayer, typename GEMMConvolutionLayer, typename WinogradConvolutionLayer>
+template <typename ConvolutionLayer,
+ typename DirectConvolutionLayer,
+ typename GEMMConvolutionLayer,
+ typename WinogradConvolutionLayer>
Status validate_convolution_layer(ConvolutionLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -146,7 +152,7 @@ Status validate_convolution_layer(ConvolutionLayerNode &node)
arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
- if(is_data_type_quantized_asymmetric(input->data_type()))
+ if (is_data_type_quantized_asymmetric(input->data_type()))
{
biases->set_data_type(DataType::S32);
}
@@ -158,23 +164,24 @@ Status validate_convolution_layer(ConvolutionLayerNode &node)
// Validate function
Status status{};
- switch(conv_algorithm)
+ switch (conv_algorithm)
{
case ConvolutionMethod::Direct:
ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
status = DirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
break;
case ConvolutionMethod::GEMM:
- status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
- WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
+ status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, WeightsInfo(),
+ Size2D(1, 1), ActivationLayerInfo(), num_groups);
break;
case ConvolutionMethod::Winograd:
ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
- status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
+ status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info,
+ ActivationLayerInfo(), fast_math);
break;
case ConvolutionMethod::Default:
- status = ConvolutionLayer::validate(input, weights, biases, output, conv_info,
- WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), fast_math, num_groups);
+ status = ConvolutionLayer::validate(input, weights, biases, output, conv_info, WeightsInfo(), Size2D(1, 1),
+ ActivationLayerInfo(), fast_math, num_groups);
break;
default:
ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported convolution method");
@@ -194,7 +201,8 @@ Status validate_convolution_layer(ConvolutionLayerNode &node)
template <typename DepthwiseConvolutionLayer>
Status validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: "
+ << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -210,7 +218,7 @@ Status validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
// Validate function
Status status{};
- switch(dwc_algorithm)
+ switch (dwc_algorithm)
{
case DepthwiseConvolutionMethod::Default:
case DepthwiseConvolutionMethod::Optimized3x3:
@@ -233,7 +241,8 @@ Status validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
template <typename DepthToSpaceLayer>
Status validate_depth_to_space_layer(DepthToSpaceLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -254,7 +263,8 @@ Status validate_depth_to_space_layer(DepthToSpaceLayerNode &node)
template <typename DequantizationLayer>
Status validate_dequantization_layer(DequantizationLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -275,7 +285,8 @@ Status validate_dequantization_layer(DequantizationLayerNode &node)
template <typename DetectionOutputLayer>
Status validate_detection_output_layer(DetectionOutputLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -299,7 +310,8 @@ Status validate_detection_output_layer(DetectionOutputLayerNode &node)
template <typename DetectionPostProcessLayer>
Status validate_detection_post_process_layer(DetectionPostProcessLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionPostProcessLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionPostProcessLayer node with ID : " << node.id() << " and Name: "
+ << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 4);
@@ -327,7 +339,8 @@ Status validate_detection_post_process_layer(DetectionPostProcessLayerNode &node
template <typename GenerateProposalsLayer>
Status validate_generate_proposals_layer(GenerateProposalsLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating GenerateProposalsLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating GenerateProposalsLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 3);
@@ -354,7 +367,8 @@ Status validate_generate_proposals_layer(GenerateProposalsLayerNode &node)
template <typename L2NormalizeLayer>
Status validate_l2_normalize_layer(L2NormalizeLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating L2NormalizeLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating L2NormalizeLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -379,7 +393,8 @@ Status validate_l2_normalize_layer(L2NormalizeLayerNode &node)
template <typename NormalizePlanarYUVLayer>
Status validate_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NormalizePlanarYUVLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating NormalizePlanarYUVLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -404,7 +419,8 @@ Status validate_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
template <typename PadLayer>
Status validate_pad_layer(PadLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PadLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PadLayer node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -427,14 +443,15 @@ Status validate_pad_layer(PadLayerNode &node)
template <typename PermuteLayer>
Status validate_permute_layer(PermuteLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PermuteLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PermuteLayer node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
// Extract IO and info
arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
- const PermutationVector &perm = node.permutation_vector();
+ const PermutationVector &perm = node.permutation_vector();
return PermuteLayer::validate(input, output, perm);
}
@@ -450,7 +467,8 @@ Status validate_permute_layer(PermuteLayerNode &node)
template <typename PReluLayer>
Status validate_prelu_layer(PReluLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PRelu node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PRelu node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -473,7 +491,8 @@ Status validate_prelu_layer(PReluLayerNode &node)
template <typename PriorBoxLayer>
Status validate_priorbox_layer(PriorBoxLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PriorBoxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating PriorBoxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -497,7 +516,8 @@ Status validate_priorbox_layer(PriorBoxLayerNode &node)
template <typename QuantizationLayer>
Status validate_quantization_layer(QuantizationLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating QuantizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating QuantizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -520,7 +540,8 @@ Status validate_quantization_layer(QuantizationLayerNode &node)
template <typename ReductionLayer>
Status validate_reduction_operation_layer(ReductionLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReductionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating ReductionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -544,7 +565,8 @@ Status validate_reduction_operation_layer(ReductionLayerNode &node)
template <typename ReorgLayer>
Status validate_reorg_layer(ReorgLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReorgLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReorgLayer node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -567,7 +589,8 @@ Status validate_reorg_layer(ReorgLayerNode &node)
template <typename ReshapeLayer>
Status validate_reshape_layer(ReshapeLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -590,14 +613,15 @@ Status validate_reshape_layer(ReshapeLayerNode &node)
template <typename ROIAlignLayer>
Status validate_roi_align_layer(ROIAlignLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ROIAlignLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+ "Validating ROIAlignLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
// Extract input and output
- arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
- arm_compute::ITensorInfo *rois = detail::get_backing_tensor_info(node.input(1));
- arm_compute::ITensorInfo *output = detail::get_backing_tensor_info(node.output(0));
+ arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
+ arm_compute::ITensorInfo *rois = detail::get_backing_tensor_info(node.input(1));
+ arm_compute::ITensorInfo *output = detail::get_backing_tensor_info(node.output(0));
const ROIPoolingLayerInfo &pool_info = node.pooling_info();
// Validate function
@@ -615,7 +639,8 @@ Status validate_roi_align_layer(ROIAlignLayerNode &node)
template <typename SliceLayer>
Status validate_slice_layer(SliceLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating Slice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating Slice node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -639,7 +664,8 @@ Status validate_slice_layer(SliceLayerNode &node)
template <typename StridedSliceLayer>
Status validate_strided_slice_layer(StridedSliceLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating StridedSlice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating StridedSlice node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -651,7 +677,8 @@ Status validate_strided_slice_layer(StridedSliceLayerNode &node)
const BiStrides strides = node.strides();
const StridedSliceLayerInfo info = node.strided_slice_info();
- return StridedSliceLayer::validate(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
+ return StridedSliceLayer::validate(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(),
+ info.shrink_axis_mask());
}
/** Validates a element-wise layer node
@@ -663,7 +690,8 @@ Status validate_strided_slice_layer(StridedSliceLayerNode &node)
template <typename EltwiseLayerFunctions>
Status validate_eltwise_Layer(EltwiseLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -678,23 +706,24 @@ Status validate_eltwise_Layer(EltwiseLayerNode &node)
const QuantizationInfo quant_info = node.output_quant_info();
// Validate function
- if(eltwise_op == EltwiseOperation::Add)
+ if (eltwise_op == EltwiseOperation::Add)
{
return EltwiseLayerFunctions::ArithmeticAddition::validate(input1, input2, output, convert_policy, act_info);
}
- else if(eltwise_op == EltwiseOperation::Sub)
+ else if (eltwise_op == EltwiseOperation::Sub)
{
return EltwiseLayerFunctions::ArithmeticSubtraction::validate(input1, input2, output, convert_policy, act_info);
}
- else if(eltwise_op == EltwiseOperation::Mul)
+ else if (eltwise_op == EltwiseOperation::Mul)
{
- return EltwiseLayerFunctions::PixelWiseMultiplication::validate(input1, input2, output, 1.0f, convert_policy, round_policy, act_info);
+ return EltwiseLayerFunctions::PixelWiseMultiplication::validate(input1, input2, output, 1.0f, convert_policy,
+ round_policy, act_info);
}
- else if(eltwise_op == EltwiseOperation::Max)
+ else if (eltwise_op == EltwiseOperation::Max)
{
return EltwiseLayerFunctions::ElementwiseMax::validate(input1, input2, output, act_info);
}
- else if(eltwise_op == EltwiseOperation::Div)
+ else if (eltwise_op == EltwiseOperation::Div)
{
return EltwiseLayerFunctions::ArithmeticDivision::validate(input1, input2, output, act_info);
}
@@ -713,7 +742,8 @@ Status validate_eltwise_Layer(EltwiseLayerNode &node)
template <typename UnaryEltwiseLayerFunctions>
Status validate_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name()
+ << std::endl);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -723,7 +753,7 @@ Status validate_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
// Validate function
- if(eltwise_op == UnaryEltwiseOperation::Exp)
+ if (eltwise_op == UnaryEltwiseOperation::Exp)
{
return UnaryEltwiseLayerFunctions::ExpLayer::validate(input, output);
}
diff --git a/arm_compute/graph/frontend/IStream.h b/arm_compute/graph/frontend/IStream.h
index f69d5437c1..1831ac0be3 100644
--- a/arm_compute/graph/frontend/IStream.h
+++ b/arm_compute/graph/frontend/IStream.h
@@ -84,8 +84,8 @@ public:
}
protected:
- StreamHints _hints = {}; /**< Execution and algorithmic hints */
- NodeID _tail_node = { EmptyNodeID }; /**< NodeID pointing to the last(tail) node of the graph */
+ StreamHints _hints = {}; /**< Execution and algorithmic hints */
+ NodeID _tail_node = {EmptyNodeID}; /**< NodeID pointing to the last(tail) node of the graph */
};
} // namespace frontend
} // namespace graph
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index fe0539bac5..bd321e6f1a 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -24,13 +24,12 @@
#ifndef ARM_COMPUTE_GRAPH_LAYERS_H
#define ARM_COMPUTE_GRAPH_LAYERS_H
-#include "arm_compute/graph/GraphBuilder.h"
-#include "arm_compute/graph/Types.h"
+#include "arm_compute/core/utils/misc/Utility.h"
#include "arm_compute/graph/frontend/ILayer.h"
#include "arm_compute/graph/frontend/IStream.h"
#include "arm_compute/graph/frontend/SubStream.h"
-
-#include "arm_compute/core/utils/misc/Utility.h"
+#include "arm_compute/graph/GraphBuilder.h"
+#include "arm_compute/graph/Types.h"
#include <memory>
#include <string>
@@ -50,14 +49,13 @@ public:
* @param[in] desc Description of input tensor.
* @param[in] accessor Accessor to get input tensor data from.
*/
- InputLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor)
- : _desc(desc), _accessor(std::move(accessor))
+ InputLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor) : _desc(desc), _accessor(std::move(accessor))
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
+ NodeParams common_params = {name(), s.hints().target_hint};
return GraphBuilder::add_input_node(s.graph(), common_params, _desc, std::move(_accessor));
}
@@ -75,14 +73,13 @@ public:
* @param[in] desc Description of input tensor.
* @param[in] accessor Accessor to get input tensor data from.
*/
- ConstantLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor)
- : _desc(desc), _accessor(std::move(accessor))
+ ConstantLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor) : _desc(desc), _accessor(std::move(accessor))
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
+ NodeParams common_params = {name(), s.hints().target_hint};
return GraphBuilder::add_const_node(s.graph(), common_params, _desc, std::move(_accessor));
}
@@ -107,8 +104,8 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), _connection_idx };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), _connection_idx};
return GraphBuilder::add_output_node(s.graph(), common_params, input, std::move(_accessor));
}
@@ -126,18 +123,17 @@ public:
* @param[in] act_info Activation information
* @param[in] out_quant_info (Optional) Output quantization info
*/
- ActivationLayer(ActivationLayerInfo act_info,
- const QuantizationInfo out_quant_info = QuantizationInfo())
- : _act_info(act_info),
- _out_quant_info(std::move(out_quant_info))
+ ActivationLayer(ActivationLayerInfo act_info, const QuantizationInfo out_quant_info = QuantizationInfo())
+ : _act_info(act_info), _out_quant_info(std::move(out_quant_info))
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
- return GraphBuilder::add_activation_node(s.graph(), common_params, input, _act_info, std::move(_out_quant_info));
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
+ return GraphBuilder::add_activation_node(s.graph(), common_params, input, _act_info,
+ std::move(_out_quant_info));
}
private:
@@ -160,10 +156,7 @@ public:
unsigned int axis,
DataType out_data_type = DataType::UNKNOWN,
const QuantizationInfo out_quant_info = QuantizationInfo())
- : _op(op),
- _axis(axis),
- _out_data_type(out_data_type),
- _out_quant_info(std::move(out_quant_info))
+ : _op(op), _axis(axis), _out_data_type(out_data_type), _out_quant_info(std::move(out_quant_info))
{
}
@@ -175,9 +168,10 @@ public:
*/
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
- return GraphBuilder::add_arg_min_max_node(s.graph(), common_params, input, _op, _axis, _out_data_type, std::move(_out_quant_info));
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
+ return GraphBuilder::add_arg_min_max_node(s.graph(), common_params, input, _op, _axis, _out_data_type,
+ std::move(_out_quant_info));
}
private:
@@ -204,7 +198,11 @@ public:
ITensorAccessorUPtr gamma = nullptr,
ITensorAccessorUPtr beta = nullptr,
float epsilon = 0.001f)
- : _mean(std::move(mean)), _var(std::move(var)), _gamma(std::move(gamma)), _beta(std::move(beta)), _epsilon(epsilon)
+ : _mean(std::move(mean)),
+ _var(std::move(var)),
+ _gamma(std::move(gamma)),
+ _beta(std::move(beta)),
+ _epsilon(epsilon)
{
}
@@ -213,10 +211,10 @@ public:
ARM_COMPUTE_ERROR_ON(_mean == nullptr);
ARM_COMPUTE_ERROR_ON(_var == nullptr);
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
- return GraphBuilder::add_batch_normalization_node(s.graph(), common_params, input, _epsilon,
- std::move(_mean), std::move(_var), std::move(_beta), std::move(_gamma));
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
+ return GraphBuilder::add_batch_normalization_node(s.graph(), common_params, input, _epsilon, std::move(_mean),
+ std::move(_var), std::move(_beta), std::move(_gamma));
}
private:
@@ -237,7 +235,9 @@ public:
* @param[in] sub_stream_deltas Graph sub-stream for the deltas
* @param[in] info Contains BoundingBox operation information described in @ref BoundingBoxTransformInfo.
*/
- BoundingBoxTransformLayer(SubStream &&sub_stream_input, SubStream &&sub_stream_deltas, BoundingBoxTransformInfo info)
+ BoundingBoxTransformLayer(SubStream &&sub_stream_input,
+ SubStream &&sub_stream_deltas,
+ BoundingBoxTransformInfo info)
: _ss_input(sub_stream_input), _ss_deltas(sub_stream_deltas), _bbox_info(info)
{
}
@@ -250,9 +250,9 @@ public:
*/
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { _ss_input.tail_node(), 0 };
- NodeIdxPair deltas = { _ss_deltas.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {_ss_input.tail_node(), 0};
+ NodeIdxPair deltas = {_ss_deltas.tail_node(), 0};
return GraphBuilder::add_bounding_box_transform_node(s.graph(), common_params, input, deltas, _bbox_info);
}
@@ -270,15 +270,14 @@ public:
*
* @param[in] num_groups Number of groups
*/
- ChannelShuffleLayer(unsigned int num_groups)
- : _num_groups(num_groups)
+ ChannelShuffleLayer(unsigned int num_groups) : _num_groups(num_groups)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_channel_shuffle_node(s.graph(), common_params, input, _num_groups);
}
@@ -297,17 +296,15 @@ public:
* @param[in] rest_sub_streams Rest sub-graph branches
*/
template <typename... Ts>
- ConcatLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
+ ConcatLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&...rest_sub_streams)
: _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
{
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
- utility::for_each([&](SubStream && sub_stream)
- {
- _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
- },
- std::move(rest_sub_streams)...);
+ utility::for_each([&](SubStream &&sub_stream)
+ { _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream))); },
+ std::move(rest_sub_streams)...);
}
/** Construct a concatenation layer
*
@@ -317,33 +314,33 @@ public:
* @param[in] rest_sub_streams Rest sub-graph branches
*/
template <typename... Ts>
- ConcatLayer(descriptors::ConcatLayerDescriptor concat_descriptor, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
+ ConcatLayer(descriptors::ConcatLayerDescriptor concat_descriptor,
+ SubStream &&sub_stream1,
+ SubStream &&sub_stream2,
+ Ts &&...rest_sub_streams)
: _sub_streams(), _concat_descriptor(concat_descriptor)
{
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
- utility::for_each([&](SubStream && sub_stream)
- {
- _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
- },
- std::move(rest_sub_streams)...);
+ utility::for_each([&](SubStream &&sub_stream)
+ { _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream))); },
+ std::move(rest_sub_streams)...);
}
/** Construct a concat layer
*
* @param[in] sub_stream Sub-stream
*/
template <typename... Ts>
- ConcatLayer(SubStream &&sub_stream)
- : _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
+ ConcatLayer(SubStream &&sub_stream) : _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
{
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
}
NodeID create_layer(IStream &s) override
{
NodeID nid = EmptyNodeID;
- NodeParams common_params = { name(), s.hints().target_hint };
- if(_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
+ NodeParams common_params = {name(), s.hints().target_hint};
+ if (_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
{
nid = _sub_streams[0]->tail_node();
}
@@ -351,14 +348,14 @@ public:
{
// Collect tail nodes and concatenate
std::vector<NodeIdxPair> nodes;
- for(auto &ss : _sub_streams)
+ for (auto &ss : _sub_streams)
{
- if(ss && (ss->tail_node() != EmptyNodeID))
+ if (ss && (ss->tail_node() != EmptyNodeID))
{
const auto tail_node = s.graph().node(ss->tail_node());
- if(tail_node != nullptr && tail_node->type() != NodeType::Output)
+ if (tail_node != nullptr && tail_node->type() != NodeType::Output)
{
- nodes.push_back({ ss->tail_node(), 0 });
+ nodes.push_back({ss->tail_node(), 0});
}
}
}
@@ -411,12 +408,12 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeIdxPair input = { s.tail_node(), 0 };
- NodeParams common_params = { name(), s.hints().target_hint };
- return GraphBuilder::add_convolution_node(s.graph(), common_params, input,
- Size2D(_conv_width, _conv_height), _ofm, _conv_info, _num_groups,
- s.hints().convolution_method_hint, s.hints().fast_math_hint,
- std::move(_weights), std::move(_bias), std::move(_weights_quant_info), std::move(_out_quant_info));
+ NodeIdxPair input = {s.tail_node(), 0};
+ NodeParams common_params = {name(), s.hints().target_hint};
+ return GraphBuilder::add_convolution_node(s.graph(), common_params, input, Size2D(_conv_width, _conv_height),
+ _ofm, _conv_info, _num_groups, s.hints().convolution_method_hint,
+ s.hints().fast_math_hint, std::move(_weights), std::move(_bias),
+ std::move(_weights_quant_info), std::move(_out_quant_info));
}
private:
@@ -461,11 +458,10 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeIdxPair input = { s.tail_node(), 0 };
- NodeParams common_params = { name(), s.hints().target_hint };
- return GraphBuilder::add_deconvolution_node(s.graph(), common_params, input,
- Size2D(_conv_width, _conv_height), _ofm, _deconv_info,
- std::move(_weights), std::move(_bias));
+ NodeIdxPair input = {s.tail_node(), 0};
+ NodeParams common_params = {name(), s.hints().target_hint};
+ return GraphBuilder::add_deconvolution_node(s.graph(), common_params, input, Size2D(_conv_width, _conv_height),
+ _ofm, _deconv_info, std::move(_weights), std::move(_bias));
}
private:
@@ -513,12 +509,12 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeIdxPair input = { s.tail_node(), 0 };
- NodeParams common_params = { name(), s.hints().target_hint };
- return GraphBuilder::add_depthwise_convolution_node(s.graph(), common_params,
- input, Size2D(_conv_width, _conv_height), _conv_info, _depth_multiplier,
- s.hints().depthwise_convolution_method_hint,
- std::move(_weights), std::move(_bias), std::move(_weights_quant_info), std::move(_out_quant_info));
+ NodeIdxPair input = {s.tail_node(), 0};
+ NodeParams common_params = {name(), s.hints().target_hint};
+ return GraphBuilder::add_depthwise_convolution_node(
+ s.graph(), common_params, input, Size2D(_conv_width, _conv_height), _conv_info, _depth_multiplier,
+ s.hints().depthwise_convolution_method_hint, std::move(_weights), std::move(_bias),
+ std::move(_weights_quant_info), std::move(_out_quant_info));
}
private:
@@ -540,15 +536,14 @@ public:
*
* @param[in] block_shape Block size to rearranged
*/
- DepthToSpaceLayer(int32_t block_shape)
- : _block_shape(block_shape)
+ DepthToSpaceLayer(int32_t block_shape) : _block_shape(block_shape)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_depth_to_space_node(s.graph(), common_params, input, _block_shape);
}
@@ -569,8 +564,8 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_dequantization_node(s.graph(), common_params, input);
}
};
@@ -585,18 +580,21 @@ public:
* @param[in] sub_stream_prior PriorBox graph sub-stream.
* @param[in] detect_info DetectionOutput parameters.
*/
- DetectionOutputLayer(SubStream &&sub_stream_conf, SubStream &&sub_stream_prior, const DetectionOutputLayerInfo &detect_info)
+ DetectionOutputLayer(SubStream &&sub_stream_conf,
+ SubStream &&sub_stream_prior,
+ const DetectionOutputLayerInfo &detect_info)
: _ss_conf(std::move(sub_stream_conf)), _ss_prior(std::move(sub_stream_prior)), _detect_info(detect_info)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input_loc = { s.tail_node(), 0 };
- NodeIdxPair input_conf = { _ss_conf.tail_node(), 0 };
- NodeIdxPair input_priorbox = { _ss_prior.tail_node(), 0 };
- return GraphBuilder::add_detection_output_node(s.graph(), common_params, input_loc, input_conf, input_priorbox, _detect_info);
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input_loc = {s.tail_node(), 0};
+ NodeIdxPair input_conf = {_ss_conf.tail_node(), 0};
+ NodeIdxPair input_priorbox = {_ss_prior.tail_node(), 0};
+ return GraphBuilder::add_detection_output_node(s.graph(), common_params, input_loc, input_conf, input_priorbox,
+ _detect_info);
}
private:
@@ -615,9 +613,14 @@ public:
* @param[in] anchors Accessor to get anchors tensor data from.
* @param[in] out_quant_info (Optional) Output quantization info
*/
- DetectionPostProcessLayer(SubStream &&sub_stream_class_prediction, DetectionPostProcessLayerInfo detect_info, ITensorAccessorUPtr anchors,
- const QuantizationInfo out_quant_info = QuantizationInfo())
- : _sub_stream_class_prediction(std::move(sub_stream_class_prediction)), _detect_info(detect_info), _anchors(std::move(anchors)), _out_quant_info(std::move(out_quant_info))
+ DetectionPostProcessLayer(SubStream &&sub_stream_class_prediction,
+ DetectionPostProcessLayerInfo detect_info,
+ ITensorAccessorUPtr anchors,
+ const QuantizationInfo out_quant_info = QuantizationInfo())
+ : _sub_stream_class_prediction(std::move(sub_stream_class_prediction)),
+ _detect_info(detect_info),
+ _anchors(std::move(anchors)),
+ _out_quant_info(std::move(out_quant_info))
{
}
@@ -625,10 +628,12 @@ public:
{
ARM_COMPUTE_ERROR_ON(_anchors == nullptr);
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input_box_encoding = { s.tail_node(), 0 };
- NodeIdxPair input_class_prediction = { _sub_stream_class_prediction.tail_node(), 0 };
- return GraphBuilder::add_detection_post_process_node(s.graph(), common_params, input_box_encoding, input_class_prediction, _detect_info, std::move(_anchors), std::move(_out_quant_info));
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input_box_encoding = {s.tail_node(), 0};
+ NodeIdxPair input_class_prediction = {_sub_stream_class_prediction.tail_node(), 0};
+ return GraphBuilder::add_detection_post_process_node(s.graph(), common_params, input_box_encoding,
+ input_class_prediction, _detect_info, std::move(_anchors),
+ std::move(_out_quant_info));
}
private:
@@ -645,15 +650,14 @@ public:
*
* @param[in] shape Output shape
*/
- DummyLayer(TensorShape shape)
- : _shape(shape)
+ DummyLayer(TensorShape shape) : _shape(shape)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_dummy_node(s.graph(), common_params, input, _shape);
}
@@ -677,9 +681,9 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input0 = { _ss0.tail_node(), 0 };
- NodeIdxPair input1 = { _ss1.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input0 = {_ss0.tail_node(), 0};
+ NodeIdxPair input1 = {_ss1.tail_node(), 0};
return GraphBuilder::add_elementwise_node(s.graph(), common_params, input0, input1, _op);
}
@@ -700,8 +704,8 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_flatten_node(s.graph(), common_params, input);
}
};
@@ -770,13 +774,13 @@ public:
*/
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
- if(_weights != nullptr)
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
+ if (_weights != nullptr)
{
- return GraphBuilder::add_fully_connected_layer(s.graph(), common_params, input, _num_outputs,
- std::move(_weights), std::move(_bias), _fc_info,
- std::move(_weights_quant_info), std::move(_out_quant_info), s.hints().fast_math_hint);
+ return GraphBuilder::add_fully_connected_layer(
+ s.graph(), common_params, input, _num_outputs, std::move(_weights), std::move(_bias), _fc_info,
+ std::move(_weights_quant_info), std::move(_out_quant_info), s.hints().fast_math_hint);
}
else
{
@@ -811,8 +815,14 @@ public:
* @param[in] ss_anchors Graph sub-stream for the anchors.
* @param[in] info Generate Proposals operation information.
*/
- GenerateProposalsLayer(SubStream &&ss_scores, SubStream &&ss_deltas, SubStream &&ss_anchors, GenerateProposalsInfo info)
- : _ss_scores(std::move(ss_scores)), _ss_deltas(std::move(ss_deltas)), _ss_anchors(std::move(ss_anchors)), _info(info)
+ GenerateProposalsLayer(SubStream &&ss_scores,
+ SubStream &&ss_deltas,
+ SubStream &&ss_anchors,
+ GenerateProposalsInfo info)
+ : _ss_scores(std::move(ss_scores)),
+ _ss_deltas(std::move(ss_deltas)),
+ _ss_anchors(std::move(ss_anchors)),
+ _info(info)
{
}
@@ -824,10 +834,10 @@ public:
*/
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair scores = { _ss_scores.tail_node(), 0 };
- NodeIdxPair deltas = { _ss_deltas.tail_node(), 0 };
- NodeIdxPair anchors = { _ss_anchors.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair scores = {_ss_scores.tail_node(), 0};
+ NodeIdxPair deltas = {_ss_deltas.tail_node(), 0};
+ NodeIdxPair anchors = {_ss_anchors.tail_node(), 0};
return GraphBuilder::add_generate_proposals_node(s.graph(), common_params, scores, deltas, anchors, _info);
}
@@ -847,15 +857,14 @@ public:
* @param[in] axis Axis to perform normalization on
* @param[in] epsilon Lower bound value for the normalization
*/
- L2NormalizeLayer(int axis, float epsilon)
- : _axis(axis), _epsilon(epsilon)
+ L2NormalizeLayer(int axis, float epsilon) : _axis(axis), _epsilon(epsilon)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_l2_normalize_node(s.graph(), common_params, input, _axis, _epsilon);
}
@@ -872,15 +881,14 @@ public:
*
* @param[in] norm_info Normalization information.
*/
- NormalizationLayer(NormalizationLayerInfo norm_info)
- : _norm_info(norm_info)
+ NormalizationLayer(NormalizationLayerInfo norm_info) : _norm_info(norm_info)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_normalization_node(s.graph(), common_params, input, _norm_info);
}
@@ -897,8 +905,7 @@ public:
* @param[in] mean Accessor to get mean tensor data from.
* @param[in] std Accessor to get std tensor data from.
*/
- NormalizePlanarYUVLayer(ITensorAccessorUPtr mean,
- ITensorAccessorUPtr std)
+ NormalizePlanarYUVLayer(ITensorAccessorUPtr mean, ITensorAccessorUPtr std)
: _mean(std::move(mean)), _std(std::move(std))
{
}
@@ -908,10 +915,10 @@ public:
ARM_COMPUTE_ERROR_ON(_mean == nullptr);
ARM_COMPUTE_ERROR_ON(_std == nullptr);
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
- return GraphBuilder::add_normalize_planar_yuv_node(s.graph(), common_params, input,
- std::move(_mean), std::move(_std));
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
+ return GraphBuilder::add_normalize_planar_yuv_node(s.graph(), common_params, input, std::move(_mean),
+ std::move(_std));
}
private:
@@ -929,15 +936,14 @@ public:
* specifies the front and the end padding in the i-th dimension.
* @param[in] pad_value Padding value to use. Defaults to 0.
*/
- PadLayer(PaddingList padding, PixelValue pad_value = PixelValue())
- : _padding(padding), _pad_value(pad_value)
+ PadLayer(PaddingList padding, PixelValue pad_value = PixelValue()) : _padding(padding), _pad_value(pad_value)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_pad_node(s.graph(), common_params, input, _padding, _pad_value);
}
@@ -956,15 +962,14 @@ public:
* @param[in] layout (Optional) Data layout to assign to permuted tensor.
* If UNKNOWN then the input's layout will be used.
*/
- PermuteLayer(PermutationVector perm, DataLayout layout = DataLayout::UNKNOWN)
- : _perm(perm), _layout(layout)
+ PermuteLayer(PermutationVector perm, DataLayout layout = DataLayout::UNKNOWN) : _perm(perm), _layout(layout)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_permute_node(s.graph(), common_params, input, _perm, _layout);
}
@@ -981,15 +986,14 @@ public:
*
* @param[in] pool_info Pooling information.
*/
- PoolingLayer(PoolingLayerInfo pool_info)
- : _pool_info(pool_info)
+ PoolingLayer(PoolingLayerInfo pool_info) : _pool_info(pool_info)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_pooling_node(s.graph(), common_params, input, _pool_info);
}
@@ -1013,9 +1017,9 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { _ss0.tail_node(), 0 };
- NodeIdxPair alpha = { _ss1.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {_ss0.tail_node(), 0};
+ NodeIdxPair alpha = {_ss1.tail_node(), 0};
return GraphBuilder::add_prelu_node(s.graph(), common_params, input, alpha);
}
@@ -1064,15 +1068,17 @@ public:
* @param[in] format_info (Optional) Format info.
* @param[in] transform (Optional) Input transform function.
*/
- PrintLayer(std::ostream &stream, const IOFormatInfo &format_info = IOFormatInfo(), const std::function<ITensor *(ITensor *)> transform = nullptr)
+ PrintLayer(std::ostream &stream,
+ const IOFormatInfo &format_info = IOFormatInfo(),
+ const std::function<ITensor *(ITensor *)> transform = nullptr)
: _stream(stream), _format_info(format_info), _transform(transform)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_print_node(s.graph(), common_params, input, _stream, _format_info, _transform);
}
@@ -1098,9 +1104,9 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input0 = { s.tail_node(), 0 };
- NodeIdxPair input1 = { _ss.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input0 = {s.tail_node(), 0};
+ NodeIdxPair input1 = {_ss.tail_node(), 0};
return GraphBuilder::add_priorbox_node(s.graph(), common_params, input0, input1, _prior_info);
}
@@ -1117,15 +1123,14 @@ public:
*
* @param[in] out_quant_info Output tensor quantization info
*/
- QuantizationLayer(QuantizationInfo out_quant_info)
- : _out_quant_info(out_quant_info)
+ QuantizationLayer(QuantizationInfo out_quant_info) : _out_quant_info(out_quant_info)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_quantization_node(s.graph(), common_params, input, _out_quant_info);
}
@@ -1150,8 +1155,8 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_reduction_operation_node(s.graph(), common_params, input, _op, _axis, _keep_dims);
}
@@ -1170,15 +1175,14 @@ public:
* @param[in] stride Stride value to use for reorganizing the values in the output tensor.
* It defines the spatial distance between 2 consecutive pixels in the x and y direction
*/
- ReorgLayer(int stride)
- : _stride(stride)
+ ReorgLayer(int stride) : _stride(stride)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_reorg_node(s.graph(), common_params, input, _stride);
}
@@ -1194,15 +1198,14 @@ public:
*
* @param[in] shape Target shape.
*/
- ReshapeLayer(TensorShape shape)
- : _shape(shape)
+ ReshapeLayer(TensorShape shape) : _shape(shape)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_reshape_node(s.graph(), common_params, input, _shape);
}
@@ -1221,8 +1224,8 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_resize_node(s.graph(), common_params, input, _policy, _width_scale, _height_scale);
}
@@ -1254,9 +1257,9 @@ public:
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { _ss_input.tail_node(), 0 };
- NodeIdxPair rois = { _ss_rois.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {_ss_input.tail_node(), 0};
+ NodeIdxPair rois = {_ss_rois.tail_node(), 0};
return GraphBuilder::add_roi_align_node(s.graph(), common_params, input, rois, _pool_info);
}
@@ -1275,16 +1278,15 @@ public:
* @param[in] mul_w Accessor to get mul weight from.
* @param[in] add_w Accessor to get add weight from.
*/
- ScaleLayer(ITensorAccessorUPtr mul_w,
- ITensorAccessorUPtr add_w)
+ ScaleLayer(ITensorAccessorUPtr mul_w, ITensorAccessorUPtr add_w)
: _mul_w(std::move(mul_w)), _add_w(std::move(add_w))
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_scale_layer(s.graph(), common_params, input, std::move(_mul_w), std::move(_add_w));
}
@@ -1302,15 +1304,14 @@ public:
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
*/
- SliceLayer(Coordinates &starts, Coordinates &ends)
- : _starts(starts), _ends(ends)
+ SliceLayer(Coordinates &starts, Coordinates &ends) : _starts(starts), _ends(ends)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_slice_node(s.graph(), common_params, input, _starts, _ends);
}
@@ -1327,15 +1328,14 @@ public:
*
* @param[in] beta (Optional) Beta value. Default 1.0.
*/
- SoftmaxLayer(float beta = 1.0f)
- : _beta(beta)
+ SoftmaxLayer(float beta = 1.0f) : _beta(beta)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_softmax_node(s.graph(), common_params, input, _beta);
}
@@ -1354,17 +1354,14 @@ public:
* @param[in] rest_sub_streams Rest sub-graph branches
*/
template <typename... Ts>
- StackLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
- : _sub_streams(), _axis(0)
+ StackLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&...rest_sub_streams) : _sub_streams(), _axis(0)
{
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
- utility::for_each([&](SubStream && sub_stream)
- {
- _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
- },
- std::move(rest_sub_streams)...);
+ utility::for_each([&](SubStream &&sub_stream)
+ { _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream))); },
+ std::move(rest_sub_streams)...);
}
/** Construct a concatenation layer
*
@@ -1374,33 +1371,30 @@ public:
* @param[in] rest_sub_streams Rest sub-graph branches
*/
template <typename... Ts>
- StackLayer(int axis, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
+ StackLayer(int axis, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&...rest_sub_streams)
: _sub_streams(), _axis(axis)
{
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
- utility::for_each([&](SubStream && sub_stream)
- {
- _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
- },
- std::move(rest_sub_streams)...);
+ utility::for_each([&](SubStream &&sub_stream)
+ { _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream))); },
+ std::move(rest_sub_streams)...);
}
/** Construct a concat layer
*
* @param[in] sub_stream Sub-stream
*/
template <typename... Ts>
- StackLayer(SubStream &&sub_stream)
- : _sub_streams(), _axis(0)
+ StackLayer(SubStream &&sub_stream) : _sub_streams(), _axis(0)
{
_sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
}
NodeID create_layer(IStream &s) override
{
NodeID nid = EmptyNodeID;
- NodeParams common_params = { name(), s.hints().target_hint };
- if(_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
+ NodeParams common_params = {name(), s.hints().target_hint};
+ if (_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
{
nid = _sub_streams[0]->tail_node();
}
@@ -1408,14 +1402,14 @@ public:
{
// Collect tail nodes and stack
std::vector<NodeIdxPair> nodes;
- for(auto &ss : _sub_streams)
+ for (auto &ss : _sub_streams)
{
- if(ss && (ss->tail_node() != EmptyNodeID))
+ if (ss && (ss->tail_node() != EmptyNodeID))
{
const auto tail_node = s.graph().node(ss->tail_node());
- if(tail_node != nullptr && tail_node->type() != NodeType::Output)
+ if (tail_node != nullptr && tail_node->type() != NodeType::Output)
{
- nodes.push_back({ ss->tail_node(), 0 });
+ nodes.push_back({ss->tail_node(), 0});
}
}
}
@@ -1440,15 +1434,18 @@ public:
* @param[in] strides The strides of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] strided_slice_info Contains masks for the starts, ends and strides
*/
- StridedSliceLayer(Coordinates &starts, Coordinates &ends, BiStrides &strides, StridedSliceLayerInfo strided_slice_info)
+ StridedSliceLayer(Coordinates &starts,
+ Coordinates &ends,
+ BiStrides &strides,
+ StridedSliceLayerInfo strided_slice_info)
: _starts(starts), _ends(ends), _strides(strides), _info(strided_slice_info)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_strided_slice_node(s.graph(), common_params, input, _starts, _ends, _strides, _info);
}
@@ -1467,15 +1464,14 @@ public:
*
* @param[in] act_info Activation info
*/
- YOLOLayer(ActivationLayerInfo act_info)
- : _act_info(act_info)
+ YOLOLayer(ActivationLayerInfo act_info) : _act_info(act_info)
{
}
NodeID create_layer(IStream &s) override
{
- NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeParams common_params = {name(), s.hints().target_hint};
+ NodeIdxPair input = {s.tail_node(), 0};
return GraphBuilder::add_yolo_node(s.graph(), common_params, input, _act_info);
}
diff --git a/arm_compute/graph/frontend/Stream.h b/arm_compute/graph/frontend/Stream.h
index db22f6d91b..7e760b6373 100644
--- a/arm_compute/graph/frontend/Stream.h
+++ b/arm_compute/graph/frontend/Stream.h
@@ -27,7 +27,6 @@
#include "arm_compute/graph/frontend/IStream.h"
#include "arm_compute/graph/frontend/IStreamOperators.h"
#include "arm_compute/graph/frontend/Types.h"
-
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/GraphManager.h"
@@ -65,7 +64,7 @@ public:
void run();
// Inherited overridden methods
- void add_layer(ILayer &layer) override;
+ void add_layer(ILayer &layer) override;
Graph &graph() override;
const Graph &graph() const override;
diff --git a/arm_compute/graph/frontend/SubStream.h b/arm_compute/graph/frontend/SubStream.h
index 2283cfeebe..c54317c52b 100644
--- a/arm_compute/graph/frontend/SubStream.h
+++ b/arm_compute/graph/frontend/SubStream.h
@@ -54,7 +54,7 @@ public:
SubStream(IStream &s);
// Inherited overridden methods
- void add_layer(ILayer &layer) override;
+ void add_layer(ILayer &layer) override;
Graph &graph() override;
const Graph &graph() const override;
diff --git a/arm_compute/graph/frontend/Types.h b/arm_compute/graph/frontend/Types.h
index bc4fe7ae0d..42b28b3cd2 100644
--- a/arm_compute/graph/frontend/Types.h
+++ b/arm_compute/graph/frontend/Types.h
@@ -33,39 +33,40 @@ namespace graph
namespace frontend
{
// Import types for graph
-using graph::DataType;
using graph::DataLayout;
using graph::DataLayoutDimension;
-using graph::TensorShape;
+using graph::DataType;
using graph::PermutationVector;
+using graph::TensorShape;
using graph::ActivationLayerInfo;
+using graph::ConvolutionMethod;
+using graph::DepthwiseConvolutionMethod;
+using graph::DimensionRoundingType;
using graph::EltwiseOperation;
+using graph::FastMathHint;
using graph::FullyConnectedLayerInfo;
+using graph::GraphConfig;
+using graph::InterpolationPolicy;
using graph::NormalizationLayerInfo;
using graph::NormType;
using graph::PadStrideInfo;
using graph::PoolingLayerInfo;
using graph::PoolingType;
+using graph::Size2D;
using graph::Target;
-using graph::ConvolutionMethod;
-using graph::FastMathHint;
-using graph::DepthwiseConvolutionMethod;
using graph::TensorDescriptor;
-using graph::DimensionRoundingType;
-using graph::GraphConfig;
-using graph::InterpolationPolicy;
-using graph::Size2D;
/** Hints that can be passed to the stream to expose parameterization */
struct StreamHints
{
- Target target_hint = { Target::UNSPECIFIED }; /**< Target execution hint */
- ConvolutionMethod convolution_method_hint = { ConvolutionMethod::Default }; /**< Convolution method hint */
- DepthwiseConvolutionMethod depthwise_convolution_method_hint = { DepthwiseConvolutionMethod::Default }; /**< Depthwise Convolution method hint */
- FastMathHint fast_math_hint = { FastMathHint::Disabled }; /**< Fast math hint */
+ Target target_hint = {Target::UNSPECIFIED}; /**< Target execution hint */
+ ConvolutionMethod convolution_method_hint = {ConvolutionMethod::Default}; /**< Convolution method hint */
+ DepthwiseConvolutionMethod depthwise_convolution_method_hint = {
+ DepthwiseConvolutionMethod::Default}; /**< Depthwise Convolution method hint */
+ FastMathHint fast_math_hint = {FastMathHint::Disabled}; /**< Fast math hint */
};
} // namespace frontend
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_STREAM_TYPES_H */ \ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_STREAM_TYPES_H */
diff --git a/arm_compute/graph/mutators/DepthConcatSubTensorMutator.h b/arm_compute/graph/mutators/DepthConcatSubTensorMutator.h
index cb1f079a2e..61d8854a61 100644
--- a/arm_compute/graph/mutators/DepthConcatSubTensorMutator.h
+++ b/arm_compute/graph/mutators/DepthConcatSubTensorMutator.h
@@ -40,7 +40,7 @@ public:
// Inherited methods overridden
virtual void mutate(Graph &g) override;
MutationType type() const override;
- const char *name() override;
+ const char *name() override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/mutators/GroupedConvolutionMutator.h b/arm_compute/graph/mutators/GroupedConvolutionMutator.h
index e68c7030d0..3ed8d786fc 100644
--- a/arm_compute/graph/mutators/GroupedConvolutionMutator.h
+++ b/arm_compute/graph/mutators/GroupedConvolutionMutator.h
@@ -40,7 +40,7 @@ public:
// Inherited methods overridden
virtual void mutate(Graph &g) override;
MutationType type() const override;
- const char *name() override;
+ const char *name() override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/mutators/InPlaceOperationMutator.h b/arm_compute/graph/mutators/InPlaceOperationMutator.h
index 6248d86a0a..86f62f1994 100644
--- a/arm_compute/graph/mutators/InPlaceOperationMutator.h
+++ b/arm_compute/graph/mutators/InPlaceOperationMutator.h
@@ -37,7 +37,7 @@ public:
// Inherited methods overridden
virtual void mutate(Graph &g) override;
MutationType type() const override;
- const char *name() override;
+ const char *name() override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/mutators/NodeExecutionMethodMutator.h b/arm_compute/graph/mutators/NodeExecutionMethodMutator.h
index 07c8ffad97..505d4ab300 100644
--- a/arm_compute/graph/mutators/NodeExecutionMethodMutator.h
+++ b/arm_compute/graph/mutators/NodeExecutionMethodMutator.h
@@ -42,7 +42,7 @@ public:
// Inherited methods overridden
virtual void mutate(Graph &g) override;
MutationType type() const override;
- const char *name() override;
+ const char *name() override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/mutators/NodeFusionMutator.h b/arm_compute/graph/mutators/NodeFusionMutator.h
index f3e3eaa190..9d2d44f436 100644
--- a/arm_compute/graph/mutators/NodeFusionMutator.h
+++ b/arm_compute/graph/mutators/NodeFusionMutator.h
@@ -38,7 +38,7 @@ public:
// Inherited methods overridden
virtual void mutate(Graph &g) override;
MutationType type() const override;
- const char *name() override;
+ const char *name() override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/mutators/SplitLayerSubTensorMutator.h b/arm_compute/graph/mutators/SplitLayerSubTensorMutator.h
index b14ef59532..ab9746a29b 100644
--- a/arm_compute/graph/mutators/SplitLayerSubTensorMutator.h
+++ b/arm_compute/graph/mutators/SplitLayerSubTensorMutator.h
@@ -40,7 +40,7 @@ public:
// Inherited methods overridden
virtual void mutate(Graph &g) override;
MutationType type() const override;
- const char *name() override;
+ const char *name() override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/mutators/SyntheticDataTypeMutator.h b/arm_compute/graph/mutators/SyntheticDataTypeMutator.h
index 2292e52086..ce8af0a1d7 100644
--- a/arm_compute/graph/mutators/SyntheticDataTypeMutator.h
+++ b/arm_compute/graph/mutators/SyntheticDataTypeMutator.h
@@ -40,7 +40,7 @@ public:
// Inherited methods overridden
virtual void mutate(Graph &g) override;
MutationType type() const override;
- const char *name() override;
+ const char *name() override;
private:
DataType _mutate_type;
diff --git a/arm_compute/graph/nodes/ActivationLayerNode.h b/arm_compute/graph/nodes/ActivationLayerNode.h
index 4a98ee248f..fe5f273db5 100644
--- a/arm_compute/graph/nodes/ActivationLayerNode.h
+++ b/arm_compute/graph/nodes/ActivationLayerNode.h
@@ -39,8 +39,7 @@ public:
* @param[in] info Activation Layer information
* @param[in] out_quant_info (Optional) Output quantization info
*/
- ActivationLayerNode(ActivationLayerInfo info,
- QuantizationInfo out_quant_info = QuantizationInfo());
+ ActivationLayerNode(ActivationLayerInfo info, QuantizationInfo out_quant_info = QuantizationInfo());
/** Activation metadata accessor
*
* @return The activation info of the layer
@@ -51,7 +50,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
public:
static constexpr NodeType node_type = NodeType::ActivationLayer;
diff --git a/arm_compute/graph/nodes/ArgMinMaxLayerNode.h b/arm_compute/graph/nodes/ArgMinMaxLayerNode.h
index 69191add99..65fbc36db6 100644
--- a/arm_compute/graph/nodes/ArgMinMaxLayerNode.h
+++ b/arm_compute/graph/nodes/ArgMinMaxLayerNode.h
@@ -65,7 +65,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
public:
static constexpr NodeType node_type = NodeType::ArgMinMaxLayer;
diff --git a/arm_compute/graph/nodes/BatchNormalizationLayerNode.h b/arm_compute/graph/nodes/BatchNormalizationLayerNode.h
index e7f4049df8..8583ed87eb 100644
--- a/arm_compute/graph/nodes/BatchNormalizationLayerNode.h
+++ b/arm_compute/graph/nodes/BatchNormalizationLayerNode.h
@@ -60,7 +60,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
public:
static constexpr NodeType node_type = NodeType::BatchNormalizationLayer;
diff --git a/arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h b/arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h
index 57175eba2e..96c2544065 100644
--- a/arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h
+++ b/arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h
@@ -50,7 +50,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
BoundingBoxTransformInfo _bbox_info;
diff --git a/arm_compute/graph/nodes/ChannelShuffleLayerNode.h b/arm_compute/graph/nodes/ChannelShuffleLayerNode.h
index 0696fe56fc..d296a2dcc3 100644
--- a/arm_compute/graph/nodes/ChannelShuffleLayerNode.h
+++ b/arm_compute/graph/nodes/ChannelShuffleLayerNode.h
@@ -49,7 +49,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
unsigned int _num_groups;
diff --git a/arm_compute/graph/nodes/ConcatenateLayerNode.h b/arm_compute/graph/nodes/ConcatenateLayerNode.h
index 8582403355..13398b1a61 100644
--- a/arm_compute/graph/nodes/ConcatenateLayerNode.h
+++ b/arm_compute/graph/nodes/ConcatenateLayerNode.h
@@ -47,7 +47,8 @@ public:
*
* @return Expected output descriptor
*/
- static TensorDescriptor compute_output_descriptor(const std::vector<TensorDescriptor> &input_descriptors, DataLayoutDimension axis);
+ static TensorDescriptor compute_output_descriptor(const std::vector<TensorDescriptor> &input_descriptors,
+ DataLayoutDimension axis);
/** Disables or not the depth concatenate node
*
* @warning This is used when concatenate is performed using sub-tensors, where this node is used as a placeholder.
@@ -78,7 +79,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
unsigned int _total_nodes;
diff --git a/arm_compute/graph/nodes/ConstNode.h b/arm_compute/graph/nodes/ConstNode.h
index b377c60208..400b9b4d9f 100644
--- a/arm_compute/graph/nodes/ConstNode.h
+++ b/arm_compute/graph/nodes/ConstNode.h
@@ -44,7 +44,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
TensorDescriptor _desc;
diff --git a/arm_compute/graph/nodes/ConvolutionLayerNode.h b/arm_compute/graph/nodes/ConvolutionLayerNode.h
index 99effa07dc..8a77b89f27 100644
--- a/arm_compute/graph/nodes/ConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/ConvolutionLayerNode.h
@@ -111,7 +111,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
public:
static constexpr NodeType node_type = NodeType::ConvolutionLayer;
diff --git a/arm_compute/graph/nodes/DeconvolutionLayerNode.h b/arm_compute/graph/nodes/DeconvolutionLayerNode.h
index e74adb17aa..553d05985c 100644
--- a/arm_compute/graph/nodes/DeconvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/DeconvolutionLayerNode.h
@@ -61,7 +61,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
descriptors::DeconvolutionLayerDescriptor descriptor;
diff --git a/arm_compute/graph/nodes/DepthToSpaceLayerNode.h b/arm_compute/graph/nodes/DepthToSpaceLayerNode.h
index 25e30e2c67..5fbcc670ff 100644
--- a/arm_compute/graph/nodes/DepthToSpaceLayerNode.h
+++ b/arm_compute/graph/nodes/DepthToSpaceLayerNode.h
@@ -56,7 +56,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
int _block_shape;
diff --git a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
index 5df86983f0..441d68d2b8 100644
--- a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
@@ -101,7 +101,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
public:
static constexpr NodeType node_type = NodeType::DepthwiseConvolutionLayer;
diff --git a/arm_compute/graph/nodes/DequantizationLayerNode.h b/arm_compute/graph/nodes/DequantizationLayerNode.h
index 4910938d47..1cce71373f 100644
--- a/arm_compute/graph/nodes/DequantizationLayerNode.h
+++ b/arm_compute/graph/nodes/DequantizationLayerNode.h
@@ -46,8 +46,8 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
};
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_DEQUANTIZATION_NODE_H */ \ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_DEQUANTIZATION_NODE_H */
diff --git a/arm_compute/graph/nodes/DetectionOutputLayerNode.h b/arm_compute/graph/nodes/DetectionOutputLayerNode.h
index b4b910c40e..c3e067e430 100644
--- a/arm_compute/graph/nodes/DetectionOutputLayerNode.h
+++ b/arm_compute/graph/nodes/DetectionOutputLayerNode.h
@@ -51,13 +51,14 @@ public:
*
* @return Output descriptor
*/
- static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, const DetectionOutputLayerInfo &info);
+ static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const DetectionOutputLayerInfo &info);
// Inherited overridden methods:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
DetectionOutputLayerInfo _info;
diff --git a/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h b/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h
index 6ff78aee07..a53aaf2b9c 100644
--- a/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h
+++ b/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h
@@ -49,7 +49,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
DetectionPostProcessLayerInfo _info;
@@ -59,4 +59,4 @@ private:
};
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_DETECTION_POST_PROCESS_LAYER_NODE_H */ \ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_DETECTION_POST_PROCESS_LAYER_NODE_H */
diff --git a/arm_compute/graph/nodes/DummyNode.h b/arm_compute/graph/nodes/DummyNode.h
index 645f1b325d..2263525a72 100644
--- a/arm_compute/graph/nodes/DummyNode.h
+++ b/arm_compute/graph/nodes/DummyNode.h
@@ -51,11 +51,11 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
TensorShape _shape;
};
} // namespace graph
} // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_DUMMY_NODE_H */ \ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_DUMMY_NODE_H */
diff --git a/arm_compute/graph/nodes/EltwiseLayerNode.h b/arm_compute/graph/nodes/EltwiseLayerNode.h
index 7a6d8e8303..258298259f 100644
--- a/arm_compute/graph/nodes/EltwiseLayerNode.h
+++ b/arm_compute/graph/nodes/EltwiseLayerNode.h
@@ -79,7 +79,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
static constexpr NodeType node_type = NodeType::EltwiseLayer;
@@ -112,7 +112,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
static constexpr NodeType node_type = NodeType::UnaryEltwiseLayer;
diff --git a/arm_compute/graph/nodes/FlattenLayerNode.h b/arm_compute/graph/nodes/FlattenLayerNode.h
index 046114c291..af104707a1 100644
--- a/arm_compute/graph/nodes/FlattenLayerNode.h
+++ b/arm_compute/graph/nodes/FlattenLayerNode.h
@@ -41,7 +41,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/nodes/FullyConnectedLayerNode.h b/arm_compute/graph/nodes/FullyConnectedLayerNode.h
index 9ade62bf4a..3bcf386d64 100644
--- a/arm_compute/graph/nodes/FullyConnectedLayerNode.h
+++ b/arm_compute/graph/nodes/FullyConnectedLayerNode.h
@@ -73,7 +73,7 @@ public:
*/
static TensorDescriptor compute_weights_descriptor(const TensorDescriptor &input_descriptor,
unsigned int num_outputs,
- FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(),
+ FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(),
const QuantizationInfo &weights_quant_info = QuantizationInfo());
/** Computes fully connected layer output descriptor
*
@@ -98,7 +98,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
static constexpr NodeType node_type = NodeType::FullyConnectedLayer;
diff --git a/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h b/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h
index b0051b1385..d891ea49eb 100644
--- a/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h
+++ b/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h
@@ -43,7 +43,8 @@ public:
* @param[in] fast_math_hint (Optional) Fast math hint
* @param[in] fused_activation (Optional) Fused activation layer. Disabled if not specified
*/
- FusedConvolutionBatchNormalizationNode(float epsilon, PadStrideInfo info,
+ FusedConvolutionBatchNormalizationNode(float epsilon,
+ PadStrideInfo info,
unsigned int num_groups = 1,
ConvolutionMethod method = ConvolutionMethod::Default,
FastMathHint fast_math_hint = FastMathHint::Disabled,
@@ -122,7 +123,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
public:
static constexpr NodeType node_type = NodeType::FusedConvolutionBatchNormalizationLayer;
diff --git a/arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h b/arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h
index a01cb9dc42..a61b155151 100644
--- a/arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h
+++ b/arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h
@@ -46,7 +46,7 @@ public:
PadStrideInfo info,
unsigned int depth_multiplier,
DepthwiseConvolutionMethod method,
- ActivationLayerInfo fused_activation = ActivationLayerInfo());
+ ActivationLayerInfo fused_activation = ActivationLayerInfo());
/** Sets the depthwise convolution layer method to use
*
@@ -117,7 +117,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
public:
static constexpr NodeType node_type = NodeType::FusedDepthwiseConvolutionBatchNormalizationLayer;
diff --git a/arm_compute/graph/nodes/GenerateProposalsLayerNode.h b/arm_compute/graph/nodes/GenerateProposalsLayerNode.h
index 6f8edc8758..b5e4b9781c 100644
--- a/arm_compute/graph/nodes/GenerateProposalsLayerNode.h
+++ b/arm_compute/graph/nodes/GenerateProposalsLayerNode.h
@@ -50,7 +50,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
GenerateProposalsInfo _info;
diff --git a/arm_compute/graph/nodes/InputNode.h b/arm_compute/graph/nodes/InputNode.h
index 07091af64f..0983d25a59 100644
--- a/arm_compute/graph/nodes/InputNode.h
+++ b/arm_compute/graph/nodes/InputNode.h
@@ -44,7 +44,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
TensorDescriptor _desc;
diff --git a/arm_compute/graph/nodes/L2NormalizeLayerNode.h b/arm_compute/graph/nodes/L2NormalizeLayerNode.h
index 8edc5b0bf3..ed11412b70 100644
--- a/arm_compute/graph/nodes/L2NormalizeLayerNode.h
+++ b/arm_compute/graph/nodes/L2NormalizeLayerNode.h
@@ -68,7 +68,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
int _axis;
diff --git a/arm_compute/graph/nodes/Nodes.h b/arm_compute/graph/nodes/Nodes.h
index ae9f177ec4..d4ad32b6f0 100644
--- a/arm_compute/graph/nodes/Nodes.h
+++ b/arm_compute/graph/nodes/Nodes.h
@@ -50,18 +50,18 @@
#include "arm_compute/graph/nodes/NormalizationLayerNode.h"
#include "arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h"
#include "arm_compute/graph/nodes/OutputNode.h"
-#include "arm_compute/graph/nodes/PReluLayerNode.h"
#include "arm_compute/graph/nodes/PadLayerNode.h"
#include "arm_compute/graph/nodes/PermuteLayerNode.h"
#include "arm_compute/graph/nodes/PoolingLayerNode.h"
+#include "arm_compute/graph/nodes/PReluLayerNode.h"
#include "arm_compute/graph/nodes/PrintLayerNode.h"
#include "arm_compute/graph/nodes/PriorBoxLayerNode.h"
#include "arm_compute/graph/nodes/QuantizationLayerNode.h"
-#include "arm_compute/graph/nodes/ROIAlignLayerNode.h"
#include "arm_compute/graph/nodes/ReductionLayerNode.h"
#include "arm_compute/graph/nodes/ReorgLayerNode.h"
#include "arm_compute/graph/nodes/ReshapeLayerNode.h"
#include "arm_compute/graph/nodes/ResizeLayerNode.h"
+#include "arm_compute/graph/nodes/ROIAlignLayerNode.h"
#include "arm_compute/graph/nodes/SliceLayerNode.h"
#include "arm_compute/graph/nodes/SoftmaxLayerNode.h"
#include "arm_compute/graph/nodes/SplitLayerNode.h"
diff --git a/arm_compute/graph/nodes/NormalizationLayerNode.h b/arm_compute/graph/nodes/NormalizationLayerNode.h
index 503b859e53..86f2fb9dba 100644
--- a/arm_compute/graph/nodes/NormalizationLayerNode.h
+++ b/arm_compute/graph/nodes/NormalizationLayerNode.h
@@ -49,7 +49,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
NormalizationLayerInfo _info;
diff --git a/arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h b/arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h
index 4d84c20de0..158acc4c23 100644
--- a/arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h
+++ b/arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h
@@ -41,7 +41,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/nodes/OutputNode.h b/arm_compute/graph/nodes/OutputNode.h
index c91bc6b699..75484ab328 100644
--- a/arm_compute/graph/nodes/OutputNode.h
+++ b/arm_compute/graph/nodes/OutputNode.h
@@ -41,7 +41,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/nodes/PReluLayerNode.h b/arm_compute/graph/nodes/PReluLayerNode.h
index b8e6c1ae7f..532fdccb3a 100644
--- a/arm_compute/graph/nodes/PReluLayerNode.h
+++ b/arm_compute/graph/nodes/PReluLayerNode.h
@@ -41,7 +41,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/nodes/PadLayerNode.h b/arm_compute/graph/nodes/PadLayerNode.h
index d6ff3553da..dcb5ea595b 100644
--- a/arm_compute/graph/nodes/PadLayerNode.h
+++ b/arm_compute/graph/nodes/PadLayerNode.h
@@ -56,7 +56,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
public:
static constexpr NodeType node_type = NodeType::PadLayer;
diff --git a/arm_compute/graph/nodes/PermuteLayerNode.h b/arm_compute/graph/nodes/PermuteLayerNode.h
index 0b2380b51c..62654e777c 100644
--- a/arm_compute/graph/nodes/PermuteLayerNode.h
+++ b/arm_compute/graph/nodes/PermuteLayerNode.h
@@ -51,7 +51,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
PermutationVector _perm;
diff --git a/arm_compute/graph/nodes/PoolingLayerNode.h b/arm_compute/graph/nodes/PoolingLayerNode.h
index b336bb906f..c81f3f98dc 100644
--- a/arm_compute/graph/nodes/PoolingLayerNode.h
+++ b/arm_compute/graph/nodes/PoolingLayerNode.h
@@ -57,7 +57,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
PoolingLayerInfo _info;
diff --git a/arm_compute/graph/nodes/PrintLayerNode.h b/arm_compute/graph/nodes/PrintLayerNode.h
index b57ac1f6d4..e7accc8015 100644
--- a/arm_compute/graph/nodes/PrintLayerNode.h
+++ b/arm_compute/graph/nodes/PrintLayerNode.h
@@ -43,7 +43,9 @@ public:
* @param[in] format_info (Optional) Format info.
* @param[in] transform (Optional) Input transform function.
*/
- PrintLayerNode(std::ostream &stream, const IOFormatInfo &format_info = IOFormatInfo(), const std::function<ITensor *(ITensor *)> transform = nullptr);
+ PrintLayerNode(std::ostream &stream,
+ const IOFormatInfo &format_info = IOFormatInfo(),
+ const std::function<ITensor *(ITensor *)> transform = nullptr);
/** Stream metadata accessor
*
@@ -67,7 +69,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
std::ostream &_stream;
diff --git a/arm_compute/graph/nodes/PriorBoxLayerNode.h b/arm_compute/graph/nodes/PriorBoxLayerNode.h
index c7eadd1fe5..db36bfb1e0 100644
--- a/arm_compute/graph/nodes/PriorBoxLayerNode.h
+++ b/arm_compute/graph/nodes/PriorBoxLayerNode.h
@@ -51,13 +51,14 @@ public:
*
* @return Output descriptor
*/
- static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, const PriorBoxLayerInfo &info);
+ static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const PriorBoxLayerInfo &info);
// Inherited overridden methods:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
PriorBoxLayerInfo _info;
diff --git a/arm_compute/graph/nodes/QuantizationLayerNode.h b/arm_compute/graph/nodes/QuantizationLayerNode.h
index e5d81afa0e..b8e4c7d27b 100644
--- a/arm_compute/graph/nodes/QuantizationLayerNode.h
+++ b/arm_compute/graph/nodes/QuantizationLayerNode.h
@@ -51,7 +51,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
static constexpr NodeType node_type = NodeType::QuantizationLayer;
diff --git a/arm_compute/graph/nodes/ROIAlignLayerNode.h b/arm_compute/graph/nodes/ROIAlignLayerNode.h
index 5abd0659b5..70309a551c 100644
--- a/arm_compute/graph/nodes/ROIAlignLayerNode.h
+++ b/arm_compute/graph/nodes/ROIAlignLayerNode.h
@@ -56,7 +56,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
ROIPoolingLayerInfo _pool_info;
diff --git a/arm_compute/graph/nodes/ReductionLayerNode.h b/arm_compute/graph/nodes/ReductionLayerNode.h
index b8d295945c..ff99466c8f 100644
--- a/arm_compute/graph/nodes/ReductionLayerNode.h
+++ b/arm_compute/graph/nodes/ReductionLayerNode.h
@@ -56,7 +56,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
ReductionOperation _op;
diff --git a/arm_compute/graph/nodes/ReorgLayerNode.h b/arm_compute/graph/nodes/ReorgLayerNode.h
index 986692ed28..a3bbcdb00f 100644
--- a/arm_compute/graph/nodes/ReorgLayerNode.h
+++ b/arm_compute/graph/nodes/ReorgLayerNode.h
@@ -57,7 +57,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
int _stride;
diff --git a/arm_compute/graph/nodes/ReshapeLayerNode.h b/arm_compute/graph/nodes/ReshapeLayerNode.h
index 727d253ce5..992275c2b1 100644
--- a/arm_compute/graph/nodes/ReshapeLayerNode.h
+++ b/arm_compute/graph/nodes/ReshapeLayerNode.h
@@ -44,7 +44,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
TensorShape _shape;
diff --git a/arm_compute/graph/nodes/ResizeLayerNode.h b/arm_compute/graph/nodes/ResizeLayerNode.h
index 79f8889f9c..480d6e517f 100644
--- a/arm_compute/graph/nodes/ResizeLayerNode.h
+++ b/arm_compute/graph/nodes/ResizeLayerNode.h
@@ -51,7 +51,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
InterpolationPolicy _policy;
diff --git a/arm_compute/graph/nodes/SliceLayerNode.h b/arm_compute/graph/nodes/SliceLayerNode.h
index 08d3794e26..63f266b217 100644
--- a/arm_compute/graph/nodes/SliceLayerNode.h
+++ b/arm_compute/graph/nodes/SliceLayerNode.h
@@ -51,7 +51,8 @@ public:
* @return Output descriptor
*/
static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
- const Coordinates &starts, const Coordinates &ends);
+ const Coordinates &starts,
+ const Coordinates &ends);
/** Start coordinates accessor
*
* @return Start coordinates of the dimensions
@@ -67,7 +68,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
Coordinates _starts;
diff --git a/arm_compute/graph/nodes/SoftmaxLayerNode.h b/arm_compute/graph/nodes/SoftmaxLayerNode.h
index 0868c6ff16..2cb1ac2cf4 100644
--- a/arm_compute/graph/nodes/SoftmaxLayerNode.h
+++ b/arm_compute/graph/nodes/SoftmaxLayerNode.h
@@ -49,7 +49,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
public:
static constexpr NodeType node_type = NodeType::SoftmaxLayer;
diff --git a/arm_compute/graph/nodes/SplitLayerNode.h b/arm_compute/graph/nodes/SplitLayerNode.h
index 13cccdd447..5e6df53c0f 100644
--- a/arm_compute/graph/nodes/SplitLayerNode.h
+++ b/arm_compute/graph/nodes/SplitLayerNode.h
@@ -55,7 +55,9 @@ public:
* @return A pair with the descriptor of the split and the starting coordinates
*/
std::pair<TensorDescriptor, Coordinates> compute_output_descriptor(const TensorDescriptor &input_descriptor,
- unsigned int num_splits, int axis, unsigned int idx);
+ unsigned int num_splits,
+ int axis,
+ unsigned int idx);
/** Number of splits accessor
*
* @return Number of splits
@@ -72,7 +74,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
unsigned int _num_splits;
diff --git a/arm_compute/graph/nodes/StackLayerNode.h b/arm_compute/graph/nodes/StackLayerNode.h
index 2990895c2b..9f0767c9f2 100644
--- a/arm_compute/graph/nodes/StackLayerNode.h
+++ b/arm_compute/graph/nodes/StackLayerNode.h
@@ -58,7 +58,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
unsigned int _total_nodes;
diff --git a/arm_compute/graph/nodes/StridedSliceLayerNode.h b/arm_compute/graph/nodes/StridedSliceLayerNode.h
index 6039f312b3..f521feb780 100644
--- a/arm_compute/graph/nodes/StridedSliceLayerNode.h
+++ b/arm_compute/graph/nodes/StridedSliceLayerNode.h
@@ -84,7 +84,7 @@ public:
NodeType type() const override;
bool forward_descriptors() override;
TensorDescriptor configure_output(size_t idx) const override;
- void accept(INodeVisitor &v) override;
+ void accept(INodeVisitor &v) override;
private:
Coordinates _starts;
diff --git a/arm_compute/graph/printers/DotGraphPrinter.h b/arm_compute/graph/printers/DotGraphPrinter.h
index 564aecfb1e..6638033044 100644
--- a/arm_compute/graph/printers/DotGraphPrinter.h
+++ b/arm_compute/graph/printers/DotGraphPrinter.h
@@ -25,7 +25,6 @@
#define ACL_ARM_COMPUTE_GRAPH_PRINTERS_DOTGRAPHPRINTER_H
#include "arm_compute/graph/IGraphPrinter.h"
-
#include "arm_compute/graph/INodeVisitor.h"
#include <string>