aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/QuantizedLstmLayer.cpp
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-03-22 17:51:06 +0000
committerfinn.williams <finn.williams@arm.com>2021-04-07 16:42:38 +0000
commit4422ceca976a88aac49b21808a43e465bc87a35e (patch)
treed4f7f3d86394f74b679c907ad3f7fc7f4537933f /src/armnn/layers/QuantizedLstmLayer.cpp
parentb70ec417989490a2a72c66ecd6c737df1c094f4c (diff)
downloadarmnn-4422ceca976a88aac49b21808a43e465bc87a35e.tar.gz
Fix graph copy memory spike
* Change layer storage of ConstTensors to std::shared_ptr<ConstCpuTensorHandle> * Change clone to share ConstTensor rather than copy * Remove uses of non-const GetTensor() call * Reduce scope of non-optimized network in ExeNet, so memory can be released after use Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: Ibb2c7309d12411d21405bd6024c76bcdf5404545
Diffstat (limited to 'src/armnn/layers/QuantizedLstmLayer.cpp')
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp24
1 files changed, 12 insertions, 12 deletions
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 578d9eb137..a1ff985abe 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -49,31 +49,31 @@ QuantizedLstmLayer* QuantizedLstmLayer::Clone(Graph& graph) const
auto layer = CloneBase<QuantizedLstmLayer>(graph, GetName());
layer->m_QuantizedLstmParameters.m_InputToInputWeights = m_QuantizedLstmParameters.m_InputToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToInputWeights) : nullptr;
+ m_QuantizedLstmParameters.m_InputToInputWeights : nullptr;
layer->m_QuantizedLstmParameters.m_InputToForgetWeights = m_QuantizedLstmParameters.m_InputToForgetWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToForgetWeights) : nullptr;
+ m_QuantizedLstmParameters.m_InputToForgetWeights : nullptr;
layer->m_QuantizedLstmParameters.m_InputToCellWeights = m_QuantizedLstmParameters.m_InputToCellWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToCellWeights) : nullptr;
+ m_QuantizedLstmParameters.m_InputToCellWeights : nullptr;
layer->m_QuantizedLstmParameters.m_InputToOutputWeights = m_QuantizedLstmParameters.m_InputToOutputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToOutputWeights) : nullptr;
+ m_QuantizedLstmParameters.m_InputToOutputWeights : nullptr;
layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = m_QuantizedLstmParameters.m_RecurrentToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToInputWeights) : nullptr;
+ m_QuantizedLstmParameters.m_RecurrentToInputWeights : nullptr;
layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = m_QuantizedLstmParameters.m_RecurrentToForgetWeights
- ? std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToForgetWeights) : nullptr;
+ ? m_QuantizedLstmParameters.m_RecurrentToForgetWeights : nullptr;
layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = m_QuantizedLstmParameters.m_RecurrentToCellWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToCellWeights) : nullptr;
+ m_QuantizedLstmParameters.m_RecurrentToCellWeights : nullptr;
layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = m_QuantizedLstmParameters.m_RecurrentToOutputWeights
- ? std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToOutputWeights) : nullptr;
+ ? m_QuantizedLstmParameters.m_RecurrentToOutputWeights : nullptr;
layer->m_QuantizedLstmParameters.m_InputGateBias = m_QuantizedLstmParameters.m_InputGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputGateBias) : nullptr;
+ m_QuantizedLstmParameters.m_InputGateBias : nullptr;
layer->m_QuantizedLstmParameters.m_ForgetGateBias = m_QuantizedLstmParameters.m_ForgetGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_ForgetGateBias) : nullptr;
+ m_QuantizedLstmParameters.m_ForgetGateBias : nullptr;
layer->m_QuantizedLstmParameters.m_CellBias = m_QuantizedLstmParameters.m_CellBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_CellBias) : nullptr;
+ m_QuantizedLstmParameters.m_CellBias : nullptr;
layer->m_QuantizedLstmParameters.m_OutputGateBias = m_QuantizedLstmParameters.m_OutputGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_OutputGateBias) : nullptr;
+ m_QuantizedLstmParameters.m_OutputGateBias : nullptr;
return std::move(layer);
}