aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/Network.cpp
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2019-08-14 09:51:36 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-08-14 10:37:35 +0000
commitbb590b42b6a877e7caf0c5e73070bab42f44c760 (patch)
tree99b82801365a5fb25ea17725755244fb5f6f9692 /src/armnn/Network.cpp
parentd65cb800d2c5acca3f31e9358fa5bfbe153e3aa9 (diff)
downloadarmnn-bb590b42b6a877e7caf0c5e73070bab42f44c760.tar.gz
IVGCVSW-3474 Refactor Lstm and QuantizedLstm Param Getters
* Change Getter Signatures to follow coding guidelines Change-Id: Ic02621e834dbf79b9df63f8b4c6339f71651e944 Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r--src/armnn/Network.cpp24
1 files changed, 12 insertions, 12 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index b30cd9f3c2..932f9eb49e 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1468,33 +1468,33 @@ IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams
// InputToX weights
layer->m_QuantizedLstmParameters.m_InputToInputWeights =
- std::make_unique<ScopedCpuTensorHandle>(params.get_InputToInputWeights());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(params.get_InputToForgetWeights());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
layer->m_QuantizedLstmParameters.m_InputToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(params.get_InputToCellWeights());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(params.get_InputToOutputWeights());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
// RecurrentToX weights
layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
- std::make_unique<ScopedCpuTensorHandle>(params.get_RecurrentToInputWeights());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(params.get_RecurrentToForgetWeights());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(params.get_RecurrentToCellWeights());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(params.get_RecurrentToOutputWeights());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
// Bias
layer->m_QuantizedLstmParameters.m_InputGateBias =
- std::make_unique<ScopedCpuTensorHandle>(params.get_InputGateBias());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetInputGateBias());
layer->m_QuantizedLstmParameters.m_ForgetGateBias =
- std::make_unique<ScopedCpuTensorHandle>(params.get_ForgetGateBias());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetForgetGateBias());
layer->m_QuantizedLstmParameters.m_CellBias =
- std::make_unique<ScopedCpuTensorHandle>(params.get_CellBias());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetCellBias());
layer->m_QuantizedLstmParameters.m_OutputGateBias =
- std::make_unique<ScopedCpuTensorHandle>(params.get_OutputGateBias());
+ std::make_unique<ScopedCpuTensorHandle>(params.GetOutputGateBias());
return layer;
}