aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/graph
diff options
context:
space:
mode:
authorIsabella Gottardi <isabella.gottardi@arm.com>2019-03-14 10:32:11 +0000
committerIsabella Gottardi <isabella.gottardi@arm.com>2019-03-20 11:23:43 +0000
commit0ae5de9124a0094e656244ad2f807c084966fc04 (patch)
treeab698ad9c43f95dda13f78cf76b753105cf69388 /arm_compute/graph
parentb0c5037d94ba7073ccabb0ebaff54db320f184c4 (diff)
downloadComputeLibrary-0ae5de9124a0094e656244ad2f807c084966fc04.tar.gz
COMPMID-1995: Prepare Graph to support different input/output quantization info
- Added support for different input/output qinfo in ActivationLayer and DepthwiseConv - Added support for different input/output qinfo in ConcatenateLayer introducing ConcatDescriptor - Added reshape validate - Allow OutputLayer to return a specific connection index from the input - Not run Inplace and Depth mutator when input/output quantization info are different Change-Id: I03f5e416fc43ddd284e1501887202a3145f76d8a Signed-off-by: Isabella Gottardi <isabella.gottardi@arm.com> Reviewed-on: https://review.mlplatform.org/c/852 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/graph')
-rw-r--r--arm_compute/graph/GraphBuilder.h27
-rw-r--r--arm_compute/graph/INode.h3
-rw-r--r--arm_compute/graph/LayerDescriptors.h69
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h25
-rw-r--r--arm_compute/graph/backends/ValidateHelpers.h24
-rw-r--r--arm_compute/graph/frontend/Layers.h72
-rw-r--r--arm_compute/graph/nodes/ActivationLayerNode.h7
-rw-r--r--arm_compute/graph/nodes/ConcatenateLayerNode.h20
-rw-r--r--arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h7
9 files changed, 193 insertions, 61 deletions
diff --git a/arm_compute/graph/GraphBuilder.h b/arm_compute/graph/GraphBuilder.h
index a2a938b1cc..590e4d9b44 100644
--- a/arm_compute/graph/GraphBuilder.h
+++ b/arm_compute/graph/GraphBuilder.h
@@ -25,6 +25,7 @@
#define __ARM_COMPUTE_GRAPH_GRAPH_BUILDER_H__
#include "arm_compute/graph/ITensorAccessor.h"
+#include "arm_compute/graph/LayerDescriptors.h"
#include "arm_compute/graph/Types.h"
namespace arm_compute
@@ -73,14 +74,16 @@ public:
static NodeID add_output_node(Graph &g, NodeParams params, NodeIdxPair input, ITensorAccessorUPtr accessor = nullptr);
/** Adds an activation layer node to the graph
*
- * @param[in] g Graph to add the node to
- * @param[in] params Common node parameters
- * @param[in] input Input to the activation layer node as a NodeID-Index pair
- * @param[in] act_info Activation layer information
+ * @param[in] g Graph to add the node to
+ * @param[in] params Common node parameters
+ * @param[in] input Input to the activation layer node as a NodeID-Index pair
+ * @param[in] act_info Activation layer information
+ * @param[in] out_quant_info (Optional) Output quantization info
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_activation_node(Graph &g, NodeParams params, NodeIdxPair input, ActivationLayerInfo act_info);
+ static NodeID add_activation_node(Graph &g, NodeParams params, NodeIdxPair input, ActivationLayerInfo act_info,
+ const QuantizationInfo out_quant_info = QuantizationInfo());
/** Adds a batch normalization layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -163,14 +166,14 @@ public:
ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr);
/** Adds a depth concatenate node to the graph
*
- * @param[in] g Graph to add the node to
- * @param[in] params Common node parameters
- * @param[in] inputs Inputs to the depth concatenate layer node as a NodeID-Index pair
- * @param[in] axis Concatenation axis
+ * @param[in] g Graph to add the node to
+ * @param[in] params Common node parameters
+ * @param[in] inputs Inputs to the depth concatenate layer node as a NodeID-Index pair
+ * @param[in] concat_descriptor Concatenation layer descriptor
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
- static NodeID add_concatenate_node(Graph &g, NodeParams params, std::vector<NodeIdxPair> inputs, DataLayoutDimension axis);
+ static NodeID add_concatenate_node(Graph &g, NodeParams params, std::vector<NodeIdxPair> inputs, descriptors::ConcatLayerDescriptor concat_descriptor);
/** Adds a depth-wise convolution layer node to the graph
*
* @param[in] g Graph to add the node to
@@ -183,13 +186,15 @@ public:
* @param[in] weights_accessor (Optional) Accessor of the weights node data
* @param[in] bias_accessor (Optional) Accessor of the bias node data
* @param[in] quant_info (Optional) Weights quantization info
+ * @param[in] out_quant_info (Optional) Output quantization info
*
* @return Node ID of the created node, EmptyNodeID in case of error
*/
static NodeID add_depthwise_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
Size2D kernel_spatial_extend, PadStrideInfo conv_info, int depth_multiplier = 1,
DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::Default,
- ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr, const QuantizationInfo quant_info = QuantizationInfo());
+ ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr, const QuantizationInfo quant_info = QuantizationInfo(),
+ const QuantizationInfo out_quant_info = QuantizationInfo());
/** Adds an element-wise layer node to the graph
*
* @param[in] g Graph to add the node to
diff --git a/arm_compute/graph/INode.h b/arm_compute/graph/INode.h
index 4219150f58..edff8379d3 100644
--- a/arm_compute/graph/INode.h
+++ b/arm_compute/graph/INode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,7 @@
#define __ARM_COMPUTE_GRAPH_INODE_H__
#include "arm_compute/core/Error.h"
+#include "arm_compute/graph/LayerDescriptors.h"
#include "arm_compute/graph/TensorDescriptor.h"
#include "arm_compute/graph/Types.h"
diff --git a/arm_compute/graph/LayerDescriptors.h b/arm_compute/graph/LayerDescriptors.h
new file mode 100644
index 0000000000..79099326ec
--- /dev/null
+++ b/arm_compute/graph/LayerDescriptors.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CONCAT_DESCRIPTOR_H__
+#define __ARM_COMPUTE_CONCAT_DESCRIPTOR_H__
+
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+namespace descriptors
+{
+/** Common node parameters */
+struct ConcatLayerDescriptor
+{
+ /** Default constructor */
+ ConcatLayerDescriptor()
+ : axis(DataLayoutDimension::CHANNEL), output_qinfo()
+ {
+ }
+
+ /** Constructor concatenate layer descriptor
+ *
+ * @param[in] axis Axis.
+ */
+ ConcatLayerDescriptor(DataLayoutDimension axis)
+ : axis(axis), output_qinfo()
+ {
+ }
+
+ /** Constructor concatenate layer descriptor
+ *
+ * @param[in] axis Axis.
+ * @param[in] output_qinfo Output quantization info.
+ */
+ ConcatLayerDescriptor(DataLayoutDimension axis, QuantizationInfo output_qinfo)
+ : axis(axis), output_qinfo(output_qinfo)
+ {
+ }
+
+ const DataLayoutDimension axis; /**< Concatenation Axis */
+ const QuantizationInfo output_qinfo; /**< Output quantizazion info */
+};
+} // namespace descriptor
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CONCAT_DESCRIPTOR_H__ */ \ No newline at end of file
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index d0035d9a84..4a423d2490 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -109,7 +109,7 @@ std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
<< node.name()
<< " Type: " << node.type()
- << " Target " << TargetInfo::TargetType
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Shape: " << input->info()->tensor_shape()
<< " Activation function: " << act_info.activation()
@@ -245,8 +245,10 @@ std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransf
func->configure(input, output, deltas, bbox_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Shape: " << input->info()->tensor_shape()
<< " BoundingBox Info img W: " << bbox_info.img_width() << " "
@@ -326,6 +328,12 @@ std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLaye
func->configure(inputs, output, concat_axis);
// Log info
+ const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
+ std::ostringstream qss;
+ if(is_quantized)
+ {
+ qss << " Output QuantInfo: " << output->info()->quantization_info();
+ }
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
<< node.name()
<< " Type: " << node.type()
@@ -334,6 +342,7 @@ std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLaye
<< " Shape: " << output->info()->tensor_shape()
<< " Num Inputs: " << inputs.size()
<< " Axis: " << concat_axis
+ << qss.str()
<< std::endl);
return std::move(func);
@@ -421,10 +430,10 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
<< " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Groups: " << num_groups
- << qss.str()
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
+ << qss.str()
<< (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
<< std::endl);
return func;
@@ -536,11 +545,11 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
<< " Type: " << func_name
<< " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
- << qss.str()
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
<< " Depth multiplier: " << depth_multiplier
+ << qss.str()
<< (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
<< std::endl);
return func;
@@ -1177,8 +1186,10 @@ std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
func->configure(input, rois, output, pool_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index 8942be2da8..dbf8f35121 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -203,6 +203,7 @@ Status validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
return status;
}
+
/** Validates a detection output layer node
*
* @tparam DetectionOutputLayer DetectionOutput layer type
@@ -372,6 +373,29 @@ Status validate_reorg_layer(ReorgLayerNode &node)
return ReorgLayer::validate(input, output, node.stride());
}
+/** Validates a Reshape layer node
+ *
+ * @tparam ReshapeLayer Reshape layer type
+ *
+ * @param[in] node Node to validate
+ *
+ * @return Status
+ */
+template <typename ReshapeLayer>
+Status validate_reshape_layer(ReshapeLayerNode &node)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
+
+ // Extract input and output
+ arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
+ arm_compute::ITensorInfo *output = detail::get_backing_tensor_info(node.output(0));
+
+ // Validate function
+ return ReshapeLayer::validate(input, output);
+}
+
/** Validates a ROI Align layer node
*
* @tparam ROIAlignLayer ROIAlign layer type
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index d062d5a53e..67dc06c878 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -72,22 +72,24 @@ class OutputLayer final : public ILayer
public:
/** Construct an output layer.
*
- * @param[in] accessor Accessor to give output tensor data to.
+ * @param[in] accessor Accessor to give output tensor data to.
+ * @param[in] connection_idx (Optional) Input connection index
*/
- OutputLayer(ITensorAccessorUPtr accessor)
- : _accessor(std::move(accessor))
+ OutputLayer(ITensorAccessorUPtr accessor, unsigned int connection_idx = 0)
+ : _accessor(std::move(accessor)), _connection_idx(connection_idx)
{
}
NodeID create_layer(IStream &s) override
{
NodeParams common_params = { name(), s.hints().target_hint };
- NodeIdxPair input = { s.tail_node(), 0 };
+ NodeIdxPair input = { s.tail_node(), _connection_idx };
return GraphBuilder::add_output_node(s.graph(), common_params, input, std::move(_accessor));
}
private:
ITensorAccessorUPtr _accessor;
+ unsigned int _connection_idx;
};
/** Activation Layer */
@@ -96,10 +98,13 @@ class ActivationLayer final : public ILayer
public:
/** Construct an activation layer.
*
- * @param[in] act_info Activation information
+ * @param[in] act_info Activation information
+ * @param[in] out_quant_info (Optional) Output quantization info
*/
- ActivationLayer(ActivationLayerInfo act_info)
- : _act_info(act_info)
+ ActivationLayer(ActivationLayerInfo act_info,
+ const QuantizationInfo out_quant_info = QuantizationInfo())
+ : _act_info(act_info),
+ _out_quant_info(std::move(out_quant_info))
{
}
@@ -107,11 +112,12 @@ public:
{
NodeParams common_params = { name(), s.hints().target_hint };
NodeIdxPair input = { s.tail_node(), 0 };
- return GraphBuilder::add_activation_node(s.graph(), common_params, input, _act_info);
+ return GraphBuilder::add_activation_node(s.graph(), common_params, input, _act_info, std::move(_out_quant_info));
}
private:
- ActivationLayerInfo _act_info;
+ ActivationLayerInfo _act_info;
+ const QuantizationInfo _out_quant_info;
};
/** Batchnormalization Layer */
@@ -225,7 +231,7 @@ public:
*/
template <typename... Ts>
ConcatLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
- : _sub_streams(), _axis(DataLayoutDimension::CHANNEL)
+ : _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
{
_sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
_sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));
@@ -238,14 +244,14 @@ public:
}
/** Construct a concatenation layer
*
- * @param[in] axis Axis over the concatenation will be performed
- * @param[in] sub_stream1 First graph branch
- * @param[in] sub_stream2 Second graph branch
- * @param[in] rest_sub_streams Rest sub-graph branches
+ * @param[in] concat_descriptor Concat layer descriptor
+ * @param[in] sub_stream1 First graph branch
+ * @param[in] sub_stream2 Second graph branch
+ * @param[in] rest_sub_streams Rest sub-graph branches
*/
template <typename... Ts>
- ConcatLayer(DataLayoutDimension axis, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
- : _sub_streams(), _axis(axis)
+ ConcatLayer(descriptors::ConcatLayerDescriptor concat_descriptor, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
+ : _sub_streams(), _concat_descriptor(concat_descriptor)
{
_sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
_sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));
@@ -262,7 +268,7 @@ public:
*/
template <typename... Ts>
ConcatLayer(SubStream &&sub_stream)
- : _sub_streams(), _axis(DataLayoutDimension::CHANNEL)
+ : _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
{
_sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
}
@@ -289,14 +295,14 @@ public:
}
}
}
- nid = GraphBuilder::add_concatenate_node(s.graph(), common_params, nodes, _axis);
+ nid = GraphBuilder::add_concatenate_node(s.graph(), common_params, nodes, _concat_descriptor);
}
return nid;
}
private:
std::vector<std::unique_ptr<SubStream>> _sub_streams;
- DataLayoutDimension _axis;
+ descriptors::ConcatLayerDescriptor _concat_descriptor;
};
/** Convolution Layer */
@@ -414,28 +420,31 @@ class DepthwiseConvolutionLayer final : public ILayer
public:
/** Construct a depthwise convolution layer.
*
- * @param[in] conv_width Convolution width.
- * @param[in] conv_height Convolution height.
- * @param[in] weights Accessor to get kernel weights from.
- * @param[in] bias Accessor to get kernel bias from.
- * @param[in] conv_info Padding and stride information.
- * @param[in] depth_multiplier (Optional) Depth multiplier parameter.
- * @param[in] quant_info (Optional) Quantization info used for weights
+ * @param[in] conv_width Convolution width.
+ * @param[in] conv_height Convolution height.
+ * @param[in] weights Accessor to get kernel weights from.
+ * @param[in] bias Accessor to get kernel bias from.
+ * @param[in] conv_info Padding and stride information.
+ * @param[in] depth_multiplier (Optional) Depth multiplier parameter.
+ * @param[in] weights_quant_info (Optional) Quantization info used for weights
+ * @param[in] out_quant_info (Optional) Output quantization info
*/
DepthwiseConvolutionLayer(unsigned int conv_width,
unsigned int conv_height,
ITensorAccessorUPtr weights,
ITensorAccessorUPtr bias,
PadStrideInfo conv_info,
- int depth_multiplier = 1,
- const QuantizationInfo quant_info = QuantizationInfo())
+ int depth_multiplier = 1,
+ const QuantizationInfo weights_quant_info = QuantizationInfo(),
+ const QuantizationInfo out_quant_info = QuantizationInfo())
: _conv_width(conv_width),
_conv_height(conv_height),
_conv_info(std::move(conv_info)),
_weights(std::move(weights)),
_bias(std::move(bias)),
_depth_multiplier(depth_multiplier),
- _quant_info(std::move(quant_info))
+ _weights_quant_info(std::move(weights_quant_info)),
+ _out_quant_info(std::move(out_quant_info))
{
}
@@ -446,7 +455,7 @@ public:
return GraphBuilder::add_depthwise_convolution_node(s.graph(), common_params,
input, Size2D(_conv_width, _conv_height), _conv_info, _depth_multiplier,
s.hints().depthwise_convolution_method_hint,
- std::move(_weights), std::move(_bias), std::move(_quant_info));
+ std::move(_weights), std::move(_bias), std::move(_weights_quant_info), std::move(_out_quant_info));
}
private:
@@ -456,7 +465,8 @@ private:
ITensorAccessorUPtr _weights;
ITensorAccessorUPtr _bias;
int _depth_multiplier;
- const QuantizationInfo _quant_info;
+ const QuantizationInfo _weights_quant_info;
+ const QuantizationInfo _out_quant_info;
};
/** DetectionOutput Layer */
class DetectionOutputLayer final : public ILayer
diff --git a/arm_compute/graph/nodes/ActivationLayerNode.h b/arm_compute/graph/nodes/ActivationLayerNode.h
index 723120655b..a17b0103e4 100644
--- a/arm_compute/graph/nodes/ActivationLayerNode.h
+++ b/arm_compute/graph/nodes/ActivationLayerNode.h
@@ -36,9 +36,11 @@ class ActivationLayerNode final : public INode
public:
/** Constructor
*
- * @param[in] info Activation Layer information
+ * @param[in] info Activation Layer information
+ * @param[in] out_quant_info (Optional) Output quantization info
*/
- ActivationLayerNode(ActivationLayerInfo info);
+ ActivationLayerNode(ActivationLayerInfo info,
+ QuantizationInfo out_quant_info = QuantizationInfo());
/** Activation metadata accessor
*
* @return The activation info of the layer
@@ -56,6 +58,7 @@ public:
private:
ActivationLayerInfo _info;
+ QuantizationInfo _out_quant_info;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/nodes/ConcatenateLayerNode.h b/arm_compute/graph/nodes/ConcatenateLayerNode.h
index 20c8523752..fc122845e8 100644
--- a/arm_compute/graph/nodes/ConcatenateLayerNode.h
+++ b/arm_compute/graph/nodes/ConcatenateLayerNode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,10 +36,10 @@ class ConcatenateLayerNode final : public INode
public:
/** Constructor
*
- * @param[in] total_nodes Number of nodes that will get concatenated
- * @param[in] axis Concatenation axis
+ * @param[in] total_nodes Number of nodes that will get concatenated
+ * @param[in] concat_descriptor Concatenate Layer Descriptor
*/
- ConcatenateLayerNode(unsigned int total_nodes, DataLayoutDimension axis);
+ ConcatenateLayerNode(unsigned int total_nodes, descriptors::ConcatLayerDescriptor concat_descriptor);
/** Computes concatenations output descriptor
*
* @param[in] input_descriptors Input descriptors
@@ -68,6 +68,12 @@ public:
*/
DataLayoutDimension concatenation_axis() const;
+ /** Concatenation output quantization info accessor
+ *
+ * @return Output quantization info
+ */
+ QuantizationInfo output_quantization_info() const;
+
// Inherited overridden methods:
NodeType type() const override;
bool forward_descriptors() override;
@@ -75,9 +81,9 @@ public:
void accept(INodeVisitor &v) override;
private:
- unsigned int _total_nodes;
- DataLayoutDimension _axis;
- bool _is_enabled;
+ unsigned int _total_nodes;
+ descriptors::ConcatLayerDescriptor _concat_descriptor;
+ bool _is_enabled;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
index 8c0aae13c9..fd0273416e 100644
--- a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,10 +39,12 @@ public:
* @param[in] info Convolution layer attributes
* @param[in] depth_multiplier (Optional) Depth multiplier parameter.
* @param[in] method (Optional) Depthwise convolution method to use
+ * @param[in] out_quant_info (Optional) Output quantization info
*/
DepthwiseConvolutionLayerNode(PadStrideInfo info,
int depth_multiplier = 1,
- DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::Default);
+ DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::Default,
+ QuantizationInfo out_quant_info = QuantizationInfo());
/** Sets the depthwise convolution method to use
*
* @param[in] method Depthwise convolution method to use
@@ -103,6 +105,7 @@ private:
PadStrideInfo _info;
int _depth_multiplier;
DepthwiseConvolutionMethod _method;
+ QuantizationInfo _out_quant_info;
ActivationLayerInfo _fused_activation;
};
} // namespace graph