aboutsummaryrefslogtreecommitdiff
path: root/src/graph/nodes
diff options
context:
space:
mode:
authorIsabella Gottardi <isabella.gottardi@arm.com>2019-03-14 10:32:11 +0000
committerIsabella Gottardi <isabella.gottardi@arm.com>2019-03-20 11:23:43 +0000
commit0ae5de9124a0094e656244ad2f807c084966fc04 (patch)
treeab698ad9c43f95dda13f78cf76b753105cf69388 /src/graph/nodes
parentb0c5037d94ba7073ccabb0ebaff54db320f184c4 (diff)
downloadComputeLibrary-0ae5de9124a0094e656244ad2f807c084966fc04.tar.gz
COMPMID-1995: Prepare Graph to support different input/output quantization info
- Added support for different input/output qinfo in ActivationLayer and DepthwiseConv - Added support for different input/output qinfo in ConcatenateLayer introducing ConcatDescriptor - Added reshape validate - Allow OutputLayer to return a specific connection index from the input - Not run Inplace and Depth mutator when input/output quantization info are different Change-Id: I03f5e416fc43ddd284e1501887202a3145f76d8a Signed-off-by: Isabella Gottardi <isabella.gottardi@arm.com> Reviewed-on: https://review.mlplatform.org/c/852 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/graph/nodes')
-rw-r--r--src/graph/nodes/ActivationLayerNode.cpp12
-rw-r--r--src/graph/nodes/ConcatenateLayerNode.cpp17
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayerNode.cpp15
3 files changed, 33 insertions, 11 deletions
diff --git a/src/graph/nodes/ActivationLayerNode.cpp b/src/graph/nodes/ActivationLayerNode.cpp
index 85cb10bbdb..ada6cf981f 100644
--- a/src/graph/nodes/ActivationLayerNode.cpp
+++ b/src/graph/nodes/ActivationLayerNode.cpp
@@ -30,8 +30,8 @@ namespace arm_compute
{
namespace graph
{
-ActivationLayerNode::ActivationLayerNode(ActivationLayerInfo info)
- : _info(info)
+ActivationLayerNode::ActivationLayerNode(ActivationLayerInfo info, QuantizationInfo out_quant_info)
+ : _info(info), _out_quant_info(out_quant_info)
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -62,7 +62,13 @@ TensorDescriptor ActivationLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
- return src->desc();
+ TensorDescriptor output_info = src->desc();
+ if(!_out_quant_info.empty())
+ {
+ output_info.quant_info = _out_quant_info;
+ }
+
+ return output_info;
}
NodeType ActivationLayerNode::type() const
diff --git a/src/graph/nodes/ConcatenateLayerNode.cpp b/src/graph/nodes/ConcatenateLayerNode.cpp
index 3ce09d0073..bbdc4dc029 100644
--- a/src/graph/nodes/ConcatenateLayerNode.cpp
+++ b/src/graph/nodes/ConcatenateLayerNode.cpp
@@ -34,8 +34,8 @@ namespace arm_compute
{
namespace graph
{
-ConcatenateLayerNode::ConcatenateLayerNode(unsigned int total_nodes, DataLayoutDimension axis)
- : _total_nodes(total_nodes), _axis(axis), _is_enabled(true)
+ConcatenateLayerNode::ConcatenateLayerNode(unsigned int total_nodes, descriptors::ConcatLayerDescriptor concat_descriptor)
+ : _total_nodes(total_nodes), _concat_descriptor(std::move(concat_descriptor)), _is_enabled(true)
{
_input_edges.resize(_total_nodes, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -53,7 +53,12 @@ bool ConcatenateLayerNode::is_enabled() const
DataLayoutDimension ConcatenateLayerNode::concatenation_axis() const
{
- return _axis;
+ return _concat_descriptor.axis;
+}
+
+QuantizationInfo ConcatenateLayerNode::output_quantization_info() const
+{
+ return _concat_descriptor.output_qinfo;
}
TensorDescriptor ConcatenateLayerNode::compute_output_descriptor(const std::vector<TensorDescriptor> &input_descriptors,
@@ -121,7 +126,11 @@ TensorDescriptor ConcatenateLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(t == nullptr);
inputs_descriptors.push_back(t->desc());
}
- output_info = compute_output_descriptor(inputs_descriptors, _axis);
+ output_info = compute_output_descriptor(inputs_descriptors, _concat_descriptor.axis);
+ if(!_concat_descriptor.output_qinfo.empty())
+ {
+ output_info.quant_info = _concat_descriptor.output_qinfo;
+ }
}
return output_info;
diff --git a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
index 75ca5f4e03..935902d3fd 100644
--- a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,8 +32,9 @@ namespace arm_compute
{
namespace graph
{
-DepthwiseConvolutionLayerNode::DepthwiseConvolutionLayerNode(PadStrideInfo info, int depth_multiplier, DepthwiseConvolutionMethod method)
- : _info(std::move(info)), _depth_multiplier(depth_multiplier), _method(method), _fused_activation()
+DepthwiseConvolutionLayerNode::DepthwiseConvolutionLayerNode(PadStrideInfo info, int depth_multiplier, DepthwiseConvolutionMethod method,
+ QuantizationInfo out_quant_info)
+ : _info(std::move(info)), _depth_multiplier(depth_multiplier), _method(method), _out_quant_info(out_quant_info), _fused_activation()
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -113,7 +114,13 @@ TensorDescriptor DepthwiseConvolutionLayerNode::configure_output(size_t idx) con
ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
- return compute_output_descriptor(src->desc(), weights->desc(), _info, _depth_multiplier);
+ TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info, _depth_multiplier);
+ if(!_out_quant_info.empty())
+ {
+ output_info.quant_info = _out_quant_info;
+ }
+
+ return output_info;
}
NodeType DepthwiseConvolutionLayerNode::type() const