aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/graph/frontend/Layers.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/graph/frontend/Layers.h')
-rw-r--r--arm_compute/graph/frontend/Layers.h33
1 files changed, 22 insertions, 11 deletions
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index 02ef56952d..a222c8546e 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -381,14 +381,22 @@ class FullyConnectedLayer final : public ILayer
public:
/** Construct a fully connected layer.
*
- * @param[in] num_outputs Number of outputs.
- * @param[in] weights Accessor to get weights from.
- * @param[in] bias Accessor to get bias from.
+ * @param[in] num_outputs Number of outputs.
+ * @param[in] weights Accessor to get weights from.
+ * @param[in] bias Accessor to get bias from.
+ * @param[in] weights_quant_info (Optional) Weights quantization information
+ * @param[in] out_quant_info (Optional) Output quantization info
*/
- FullyConnectedLayer(unsigned int num_outputs,
- ITensorAccessorUPtr weights,
- ITensorAccessorUPtr bias)
- : _num_outputs(num_outputs), _weights(std::move(weights)), _bias(std::move(bias))
+ FullyConnectedLayer(unsigned int num_outputs,
+ ITensorAccessorUPtr weights,
+ ITensorAccessorUPtr bias,
+ const QuantizationInfo weights_quant_info = QuantizationInfo(),
+ const QuantizationInfo out_quant_info = QuantizationInfo())
+ : _num_outputs(num_outputs),
+ _weights(std::move(weights)),
+ _bias(std::move(bias)),
+ _weights_quant_info(std::move(weights_quant_info)),
+ _out_quant_info(std::move(out_quant_info))
{
}
@@ -397,13 +405,16 @@ public:
NodeParams common_params = { name(), s.hints().target_hint };
NodeIdxPair input = { s.tail_node(), 0 };
return GraphBuilder::add_fully_connected_layer(s.graph(), common_params, input, _num_outputs,
- std::move(_weights), std::move(_bias));
+ std::move(_weights), std::move(_bias),
+ std::move(_weights_quant_info), std::move(_out_quant_info));
}
private:
- unsigned int _num_outputs;
- ITensorAccessorUPtr _weights;
- ITensorAccessorUPtr _bias;
+ unsigned int _num_outputs;
+ ITensorAccessorUPtr _weights;
+ ITensorAccessorUPtr _bias;
+ const QuantizationInfo _weights_quant_info;
+ const QuantizationInfo _out_quant_info;
};
/** Normalization Layer */