aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/Network.cpp
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2019-07-17 11:27:46 +0100
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-24 10:40:13 +0100
commitee18dc8d1725f472850ab0c398fd7cbc4b850891 (patch)
treeb57738b18781d512f5438ca5154652571393e4e8 /src/armnn/Network.cpp
parent7b1845206d723a91aec811edaf7cb0cf832dfd25 (diff)
downloadarmnn-ee18dc8d1725f472850ab0c398fd7cbc4b850891.tar.gz
IVGCVSW-3469 Add front end for Quantized LSTM layer
* Added new layer QuantizedLstm (Android Q) * Made necessary changes to APIs * Added unit tests Change-Id: I3b9f16b0e7e49f51932cf204c87cb7118798123a Signed-off-by: James Conroy <james.conroy@arm.com>
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r--src/armnn/Network.cpp38
1 files changed, 38 insertions, 0 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index a43800827f..2195c71735 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1458,6 +1458,44 @@ IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor
return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
}
+IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
+ const char* name)
+{
+ const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
+
+ // InputToX weights
+ layer->m_QuantizedLstmParameters.m_InputToInputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_InputToInputWeights());
+ layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_InputToForgetWeights());
+ layer->m_QuantizedLstmParameters.m_InputToCellWeights =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_InputToCellWeights());
+ layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_InputToOutputWeights());
+
+ // RecurrentToX weights
+ layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_RecurrentToInputWeights());
+ layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_RecurrentToForgetWeights());
+ layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_RecurrentToCellWeights());
+ layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_RecurrentToOutputWeights());
+
+ // Bias
+ layer->m_QuantizedLstmParameters.m_InputGateBias =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_InputGateBias());
+ layer->m_QuantizedLstmParameters.m_ForgetGateBias =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_ForgetGateBias());
+ layer->m_QuantizedLstmParameters.m_CellBias =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_CellBias());
+ layer->m_QuantizedLstmParameters.m_OutputGateBias =
+ std::make_unique<ScopedCpuTensorHandle>(params.get_OutputGateBias());
+
+ return layer;
+}
+
void Network::Accept(ILayerVisitor& visitor) const
{
for (auto layer : GetGraph())