aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-03-22 17:51:06 +0000
committerfinn.williams <finn.williams@arm.com>2021-04-07 16:42:38 +0000
commit4422ceca976a88aac49b21808a43e465bc87a35e (patch)
treed4f7f3d86394f74b679c907ad3f7fc7f4537933f /src/armnn/layers
parentb70ec417989490a2a72c66ecd6c737df1c094f4c (diff)
downloadarmnn-4422ceca976a88aac49b21808a43e465bc87a35e.tar.gz
Fix graph copy memory spike
* Change layer storage of ConstTensors to std::shared_ptr<ConstCpuTensorHandle> * Change clone to share ConstTensor rather than copy * Remove uses of non-const GetTensor() call * Reduce scope of non-optimized network in ExeNet, so memory can be released after use Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: Ibb2c7309d12411d21405bd6024c76bcdf5404545
Diffstat (limited to 'src/armnn/layers')
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp8
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp8
-rw-r--r--src/armnn/layers/ConstantLayer.cpp2
-rw-r--r--src/armnn/layers/ConstantLayer.hpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp4
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp4
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp4
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp4
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp4
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp4
-rw-r--r--src/armnn/layers/LstmLayer.cpp42
-rw-r--r--src/armnn/layers/LstmLayer.hpp42
-rw-r--r--src/armnn/layers/QLstmLayer.cpp42
-rw-r--r--src/armnn/layers/QLstmLayer.hpp42
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp24
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp24
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp4
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp4
20 files changed, 136 insertions, 136 deletions
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 6df5195a55..680d9e56a0 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -41,10 +41,10 @@ BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const
{
auto layer = CloneBase<BatchNormalizationLayer>(graph, m_Param, GetName());
- layer->m_Mean = m_Mean ? std::make_unique<ScopedCpuTensorHandle>(*m_Mean) : nullptr;
- layer->m_Variance = m_Variance ? std::make_unique<ScopedCpuTensorHandle>(*m_Variance) : nullptr;
- layer->m_Beta = m_Beta ? std::make_unique<ScopedCpuTensorHandle>(*m_Beta) : nullptr;
- layer->m_Gamma = m_Gamma ? std::make_unique<ScopedCpuTensorHandle>(*m_Gamma) : nullptr;
+ layer->m_Mean = m_Mean ? m_Mean : nullptr;
+ layer->m_Variance = m_Variance ? m_Variance : nullptr;
+ layer->m_Beta = m_Beta ? m_Beta : nullptr;
+ layer->m_Gamma = m_Gamma ? m_Gamma : nullptr;
return std::move(layer);
}
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index dab75d1e12..bf9e4b7917 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -16,13 +16,13 @@ class BatchNormalizationLayer : public LayerWithParameters<BatchNormalizationDes
{
public:
/// A unique pointer to store Mean values
- std::unique_ptr<ScopedCpuTensorHandle> m_Mean;
+ std::shared_ptr<ConstCpuTensorHandle> m_Mean;
/// A unique pointer to store Variance values
- std::unique_ptr<ScopedCpuTensorHandle> m_Variance;
+ std::shared_ptr<ConstCpuTensorHandle> m_Variance;
/// A unique pointer to store Beta values
- std::unique_ptr<ScopedCpuTensorHandle> m_Beta;
+ std::shared_ptr<ConstCpuTensorHandle> m_Beta;
/// A unique pointer to store Gamma values
- std::unique_ptr<ScopedCpuTensorHandle> m_Gamma;
+ std::shared_ptr<ConstCpuTensorHandle> m_Gamma;
/// Makes a workload for the BatchNormalization type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 31e9e974cf..8ae34b6709 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -32,7 +32,7 @@ ConstantLayer* ConstantLayer::Clone(Graph& graph) const
// Cloned layers share the same layer output object.
auto layer = CloneBase<ConstantLayer>(graph, GetName());
- layer->m_LayerOutput = m_LayerOutput ? std::make_unique<ScopedCpuTensorHandle>(*m_LayerOutput) : nullptr;
+ layer->m_LayerOutput = m_LayerOutput ? m_LayerOutput : nullptr;
return std::move(layer);
}
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index 9d91551df9..ff4c03775f 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -43,7 +43,7 @@ public:
void ExecuteStrategy(IStrategy& strategy) const override;
- std::unique_ptr<ScopedCpuTensorHandle> m_LayerOutput;
+ std::shared_ptr<ConstCpuTensorHandle> m_LayerOutput;
protected:
/// Constructor to create a ConstantLayer.
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 0c3040ea6e..cf7cf0f129 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -70,11 +70,11 @@ Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const
{
auto layer = CloneBase<Convolution2dLayer>(graph, m_Param, GetName());
- layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr;
+ layer->m_Weight = m_Weight ? m_Weight : nullptr;
if (layer->m_Param.m_BiasEnabled)
{
- layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr;
+ layer->m_Bias = m_Bias ? m_Bias : nullptr;
}
return std::move(layer);
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 440c80dfa9..2d5ab194de 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -17,9 +17,9 @@ class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor>
public:
/// A unique pointer to store Weight values.
- std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstCpuTensorHandle> m_Weight;
/// A unique pointer to store Bias values.
- std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstCpuTensorHandle> m_Bias;
/// Makes a workload for the Convolution2d type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 1871b7d15d..0b2114a196 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -71,11 +71,11 @@ std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWo
DepthwiseConvolution2dLayer* DepthwiseConvolution2dLayer::Clone(Graph& graph) const
{
auto layer = CloneBase<DepthwiseConvolution2dLayer>(graph, m_Param, GetName());
- layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr;
+ layer->m_Weight = m_Weight ? m_Weight : nullptr;
if (layer->m_Param.m_BiasEnabled)
{
- layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr;
+ layer->m_Bias = m_Bias ? m_Bias : nullptr;
}
return std::move(layer);
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index 7388cbcd8e..c83aa434d5 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -16,9 +16,9 @@ class DepthwiseConvolution2dLayer : public LayerWithParameters<DepthwiseConvolut
{
public:
/// A unique pointer to store Weight values.
- std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstCpuTensorHandle> m_Weight;
/// A unique pointer to store Bias values.
- std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstCpuTensorHandle> m_Bias;
/// Makes a workload for the DepthwiseConvolution2d type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 356377a2f5..e5bbeca424 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -32,7 +32,7 @@ std::unique_ptr<IWorkload> DetectionPostProcessLayer::CreateWorkload(const armnn
DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const
{
auto layer = CloneBase<DetectionPostProcessLayer>(graph, m_Param, GetName());
- layer->m_Anchors = m_Anchors ? std::make_unique<ScopedCpuTensorHandle>(*m_Anchors) : nullptr;
+ layer->m_Anchors = m_Anchors ? m_Anchors : nullptr;
return std::move(layer);
}
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index b0d58589b4..e40966a19c 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -17,7 +17,7 @@ class DetectionPostProcessLayer : public LayerWithParameters<DetectionPostProces
{
public:
/// A unique pointer to store Anchor values.
- std::unique_ptr<ScopedCpuTensorHandle> m_Anchors;
+ std::shared_ptr<ConstCpuTensorHandle> m_Anchors;
/// Makes a workload for the DetectionPostProcess type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 6d0b57a84c..44c8920136 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -44,11 +44,11 @@ FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const
auto layer = CloneBase<FullyConnectedLayer>(graph, m_Param, GetName());
if (m_Param.m_ConstantWeights)
{
- layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr;
+ layer->m_Weight = m_Weight ? m_Weight : nullptr;
if (layer->m_Param.m_BiasEnabled)
{
- layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr;
+ layer->m_Bias = m_Bias ? m_Bias : nullptr;
}
}
return std::move(layer);
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index 4a9cbe1136..c45b081c85 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -16,9 +16,9 @@ class FullyConnectedLayer : public LayerWithParameters<FullyConnectedDescriptor>
{
public:
/// A unique pointer to store Weight values.
- std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstCpuTensorHandle> m_Weight;
/// A unique pointer to store Bias values.
- std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstCpuTensorHandle> m_Bias;
/// Makes a workload for the FullyConnected type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index ebc408a636..0eeb2f8eab 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -82,41 +82,41 @@ LstmLayer* LstmLayer::Clone(Graph& graph) const
auto layer = CloneBase<LstmLayer>(graph, m_Param, GetName());
layer->m_BasicParameters.m_InputToForgetWeights = m_BasicParameters.m_InputToForgetWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_InputToForgetWeights)
+ m_BasicParameters.m_InputToForgetWeights
: nullptr;
layer->m_BasicParameters.m_InputToCellWeights = m_BasicParameters.m_InputToCellWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_InputToCellWeights) : nullptr;
+ m_BasicParameters.m_InputToCellWeights : nullptr;
layer->m_BasicParameters.m_InputToOutputWeights = m_BasicParameters.m_InputToOutputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_InputToOutputWeights) : nullptr;
+ m_BasicParameters.m_InputToOutputWeights : nullptr;
layer->m_BasicParameters.m_RecurrentToForgetWeights = m_BasicParameters.m_RecurrentToForgetWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_RecurrentToForgetWeights) : nullptr;
+ m_BasicParameters.m_RecurrentToForgetWeights : nullptr;
layer->m_BasicParameters.m_RecurrentToCellWeights = m_BasicParameters.m_RecurrentToCellWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_RecurrentToCellWeights) : nullptr;
+ m_BasicParameters.m_RecurrentToCellWeights : nullptr;
layer->m_BasicParameters.m_RecurrentToOutputWeights = m_BasicParameters.m_RecurrentToOutputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_RecurrentToOutputWeights) : nullptr;
+ m_BasicParameters.m_RecurrentToOutputWeights : nullptr;
layer->m_BasicParameters.m_ForgetGateBias = m_BasicParameters.m_ForgetGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_ForgetGateBias) : nullptr;
+ m_BasicParameters.m_ForgetGateBias : nullptr;
layer->m_BasicParameters.m_CellBias = m_BasicParameters.m_CellBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_CellBias) : nullptr;
+ m_BasicParameters.m_CellBias : nullptr;
layer->m_BasicParameters.m_OutputGateBias = m_BasicParameters.m_OutputGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_OutputGateBias) : nullptr;
+ m_BasicParameters.m_OutputGateBias : nullptr;
if (!m_Param.m_CifgEnabled)
{
layer->m_CifgParameters.m_InputToInputWeights = m_CifgParameters.m_InputToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_CifgParameters.m_InputToInputWeights) : nullptr;
+ m_CifgParameters.m_InputToInputWeights : nullptr;
layer->m_CifgParameters.m_RecurrentToInputWeights = m_CifgParameters.m_RecurrentToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_CifgParameters.m_RecurrentToInputWeights) : nullptr;
+ m_CifgParameters.m_RecurrentToInputWeights : nullptr;
layer->m_CifgParameters.m_InputGateBias = m_CifgParameters.m_InputGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_CifgParameters.m_InputGateBias) : nullptr;
+ m_CifgParameters.m_InputGateBias : nullptr;
}
if (m_Param.m_ProjectionEnabled)
{
layer->m_ProjectionParameters.m_ProjectionWeights = m_ProjectionParameters.m_ProjectionWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_ProjectionParameters.m_ProjectionWeights) : nullptr;
+ m_ProjectionParameters.m_ProjectionWeights : nullptr;
layer->m_ProjectionParameters.m_ProjectionBias = m_ProjectionParameters.m_ProjectionBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_ProjectionParameters.m_ProjectionBias) : nullptr;
+ m_ProjectionParameters.m_ProjectionBias : nullptr;
}
if (m_Param.m_PeepholeEnabled)
@@ -124,24 +124,24 @@ LstmLayer* LstmLayer::Clone(Graph& graph) const
if (!m_Param.m_CifgEnabled)
{
layer->m_PeepholeParameters.m_CellToInputWeights = m_PeepholeParameters.m_CellToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_PeepholeParameters.m_CellToInputWeights) : nullptr;
+ m_PeepholeParameters.m_CellToInputWeights : nullptr;
}
layer->m_PeepholeParameters.m_CellToForgetWeights = m_PeepholeParameters.m_CellToForgetWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_PeepholeParameters.m_CellToForgetWeights) : nullptr;
+ m_PeepholeParameters.m_CellToForgetWeights : nullptr;
layer->m_PeepholeParameters.m_CellToOutputWeights = m_PeepholeParameters.m_CellToOutputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_PeepholeParameters.m_CellToOutputWeights) : nullptr;
+ m_PeepholeParameters.m_CellToOutputWeights : nullptr;
}
if (m_Param.m_LayerNormEnabled)
{
layer->m_LayerNormParameters.m_InputLayerNormWeights = m_LayerNormParameters.m_InputLayerNormWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_InputLayerNormWeights) : nullptr;
+ m_LayerNormParameters.m_InputLayerNormWeights : nullptr;
layer->m_LayerNormParameters.m_ForgetLayerNormWeights = m_LayerNormParameters.m_ForgetLayerNormWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_ForgetLayerNormWeights) : nullptr;
+ m_LayerNormParameters.m_ForgetLayerNormWeights : nullptr;
layer->m_LayerNormParameters.m_CellLayerNormWeights = m_LayerNormParameters.m_CellLayerNormWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_CellLayerNormWeights) : nullptr;
+ m_LayerNormParameters.m_CellLayerNormWeights : nullptr;
layer->m_LayerNormParameters.m_OutputLayerNormWeights = m_LayerNormParameters.m_OutputLayerNormWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_OutputLayerNormWeights) : nullptr;
+ m_LayerNormParameters.m_OutputLayerNormWeights : nullptr;
}
return std::move(layer);
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index 30f952e276..80b57a88f7 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -14,63 +14,63 @@ class ScopedCpuTensorHandle;
struct LstmOptLayerNormParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_InputLayerNormWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetLayerNormWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_ForgetLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_CellLayerNormWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputLayerNormWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_OutputLayerNormWeights;
};
struct LstmOptCifgParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputGateBias;
};
struct LstmOptProjectionParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_ProjectionWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [output_size].
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_ProjectionBias;
};
struct LstmOptPeepholeParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellToForgetWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellToOutputWeights;
};
struct LstmBasicParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToOutputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToOutputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_ForgetGateBias;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_CellBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellBias;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_OutputGateBias;
};
/// This layer represents a LSTM operation.
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index d957bbb485..16aa718eb9 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -83,68 +83,68 @@ QLstmLayer* QLstmLayer::Clone(Graph& graph) const
auto layer = CloneBase<QLstmLayer>(graph, m_Param, GetName());
layer->m_BasicParameters.m_InputToForgetWeights = m_BasicParameters.m_InputToForgetWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_InputToForgetWeights) : nullptr;
+ m_BasicParameters.m_InputToForgetWeights : nullptr;
layer->m_BasicParameters.m_InputToCellWeights = m_BasicParameters.m_InputToCellWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_InputToCellWeights) : nullptr;
+ m_BasicParameters.m_InputToCellWeights : nullptr;
layer->m_BasicParameters.m_InputToOutputWeights = m_BasicParameters.m_InputToOutputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_InputToOutputWeights) : nullptr;
+ m_BasicParameters.m_InputToOutputWeights : nullptr;
layer->m_BasicParameters.m_RecurrentToForgetWeights = m_BasicParameters.m_RecurrentToForgetWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_RecurrentToForgetWeights) : nullptr;
+ m_BasicParameters.m_RecurrentToForgetWeights : nullptr;
layer->m_BasicParameters.m_RecurrentToCellWeights = m_BasicParameters.m_RecurrentToCellWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_RecurrentToCellWeights) : nullptr;
+ m_BasicParameters.m_RecurrentToCellWeights : nullptr;
layer->m_BasicParameters.m_RecurrentToOutputWeights = m_BasicParameters.m_RecurrentToOutputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_RecurrentToOutputWeights) : nullptr;
+ m_BasicParameters.m_RecurrentToOutputWeights : nullptr;
layer->m_BasicParameters.m_ForgetGateBias = m_BasicParameters.m_ForgetGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_ForgetGateBias) : nullptr;
+ m_BasicParameters.m_ForgetGateBias : nullptr;
layer->m_BasicParameters.m_CellBias = m_BasicParameters.m_CellBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_CellBias) : nullptr;
+ m_BasicParameters.m_CellBias : nullptr;
layer->m_BasicParameters.m_OutputGateBias = m_BasicParameters.m_OutputGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_BasicParameters.m_OutputGateBias) : nullptr;
+ m_BasicParameters.m_OutputGateBias : nullptr;
if (!m_Param.m_CifgEnabled)
{
layer->m_CifgParameters.m_InputToInputWeights = m_CifgParameters.m_InputToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_CifgParameters.m_InputToInputWeights) : nullptr;
+ m_CifgParameters.m_InputToInputWeights : nullptr;
layer->m_CifgParameters.m_RecurrentToInputWeights = m_CifgParameters.m_RecurrentToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_CifgParameters.m_RecurrentToInputWeights) : nullptr;
+ m_CifgParameters.m_RecurrentToInputWeights : nullptr;
layer->m_CifgParameters.m_InputGateBias = m_CifgParameters.m_InputGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_CifgParameters.m_InputGateBias) : nullptr;
+ m_CifgParameters.m_InputGateBias : nullptr;
}
if (m_Param.m_ProjectionEnabled)
{
layer->m_ProjectionParameters.m_ProjectionWeights = m_ProjectionParameters.m_ProjectionWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_ProjectionParameters.m_ProjectionWeights) : nullptr;
+ m_ProjectionParameters.m_ProjectionWeights : nullptr;
layer->m_ProjectionParameters.m_ProjectionBias = m_ProjectionParameters.m_ProjectionBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_ProjectionParameters.m_ProjectionBias) : nullptr;
+ m_ProjectionParameters.m_ProjectionBias : nullptr;
}
if (m_Param.m_PeepholeEnabled)
{
if (!m_Param.m_CifgEnabled) {
layer->m_PeepholeParameters.m_CellToInputWeights = m_PeepholeParameters.m_CellToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_PeepholeParameters.m_CellToInputWeights) : nullptr;
+ m_PeepholeParameters.m_CellToInputWeights : nullptr;
}
layer->m_PeepholeParameters.m_CellToForgetWeights = m_PeepholeParameters.m_CellToForgetWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_PeepholeParameters.m_CellToForgetWeights) : nullptr;
+ m_PeepholeParameters.m_CellToForgetWeights : nullptr;
layer->m_PeepholeParameters.m_CellToOutputWeights = m_PeepholeParameters.m_CellToOutputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_PeepholeParameters.m_CellToOutputWeights) : nullptr;
+ m_PeepholeParameters.m_CellToOutputWeights : nullptr;
}
if (m_Param.m_LayerNormEnabled)
{
if (!m_Param.m_CifgEnabled) {
layer->m_LayerNormParameters.m_InputLayerNormWeights = m_LayerNormParameters.m_InputLayerNormWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_InputLayerNormWeights) : nullptr;
+ m_LayerNormParameters.m_InputLayerNormWeights : nullptr;
}
layer->m_LayerNormParameters.m_ForgetLayerNormWeights = m_LayerNormParameters.m_ForgetLayerNormWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_ForgetLayerNormWeights) : nullptr;
+ m_LayerNormParameters.m_ForgetLayerNormWeights : nullptr;
layer->m_LayerNormParameters.m_CellLayerNormWeights = m_LayerNormParameters.m_CellLayerNormWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_CellLayerNormWeights) : nullptr;
+ m_LayerNormParameters.m_CellLayerNormWeights : nullptr;
layer->m_LayerNormParameters.m_OutputLayerNormWeights = m_LayerNormParameters.m_OutputLayerNormWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_LayerNormParameters.m_OutputLayerNormWeights) : nullptr;
+ m_LayerNormParameters.m_OutputLayerNormWeights : nullptr;
}
return std::move(layer);
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 70cc4f2b15..09a020dc1d 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -14,65 +14,65 @@ class ScopedCpuTensorHandle;
struct QLstmBasicParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToOutputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToOutputWeights;
/// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_ForgetGateBias;
/// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
- std::unique_ptr<ScopedCpuTensorHandle> m_CellBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellBias;
/// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_OutputGateBias;
};
struct QLstmOptProjectionParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units] (QSymmS8).
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_ProjectionWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [output_size] (int32).
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_ProjectionBias;
};
struct QLstmOptPeepholeParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellToForgetWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellToOutputWeights;
};
struct QLstmOptCifgParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units] (QSymmS8).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units] (QSymmS8).
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (int32).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputGateBias;
};
struct QLstmOptLayerNormParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputLayerNormWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetLayerNormWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_ForgetLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::unique_ptr<ScopedCpuTensorHandle> m_CellLayerNormWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputLayerNormWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_OutputLayerNormWeights;
};
/// This layer represents a QLstm operation.
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 578d9eb137..a1ff985abe 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -49,31 +49,31 @@ QuantizedLstmLayer* QuantizedLstmLayer::Clone(Graph& graph) const
auto layer = CloneBase<QuantizedLstmLayer>(graph, GetName());
layer->m_QuantizedLstmParameters.m_InputToInputWeights = m_QuantizedLstmParameters.m_InputToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToInputWeights) : nullptr;
+ m_QuantizedLstmParameters.m_InputToInputWeights : nullptr;
layer->m_QuantizedLstmParameters.m_InputToForgetWeights = m_QuantizedLstmParameters.m_InputToForgetWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToForgetWeights) : nullptr;
+ m_QuantizedLstmParameters.m_InputToForgetWeights : nullptr;
layer->m_QuantizedLstmParameters.m_InputToCellWeights = m_QuantizedLstmParameters.m_InputToCellWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToCellWeights) : nullptr;
+ m_QuantizedLstmParameters.m_InputToCellWeights : nullptr;
layer->m_QuantizedLstmParameters.m_InputToOutputWeights = m_QuantizedLstmParameters.m_InputToOutputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToOutputWeights) : nullptr;
+ m_QuantizedLstmParameters.m_InputToOutputWeights : nullptr;
layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = m_QuantizedLstmParameters.m_RecurrentToInputWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToInputWeights) : nullptr;
+ m_QuantizedLstmParameters.m_RecurrentToInputWeights : nullptr;
layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = m_QuantizedLstmParameters.m_RecurrentToForgetWeights
- ? std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToForgetWeights) : nullptr;
+ ? m_QuantizedLstmParameters.m_RecurrentToForgetWeights : nullptr;
layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = m_QuantizedLstmParameters.m_RecurrentToCellWeights ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToCellWeights) : nullptr;
+ m_QuantizedLstmParameters.m_RecurrentToCellWeights : nullptr;
layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = m_QuantizedLstmParameters.m_RecurrentToOutputWeights
- ? std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToOutputWeights) : nullptr;
+ ? m_QuantizedLstmParameters.m_RecurrentToOutputWeights : nullptr;
layer->m_QuantizedLstmParameters.m_InputGateBias = m_QuantizedLstmParameters.m_InputGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputGateBias) : nullptr;
+ m_QuantizedLstmParameters.m_InputGateBias : nullptr;
layer->m_QuantizedLstmParameters.m_ForgetGateBias = m_QuantizedLstmParameters.m_ForgetGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_ForgetGateBias) : nullptr;
+ m_QuantizedLstmParameters.m_ForgetGateBias : nullptr;
layer->m_QuantizedLstmParameters.m_CellBias = m_QuantizedLstmParameters.m_CellBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_CellBias) : nullptr;
+ m_QuantizedLstmParameters.m_CellBias : nullptr;
layer->m_QuantizedLstmParameters.m_OutputGateBias = m_QuantizedLstmParameters.m_OutputGateBias ?
- std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_OutputGateBias) : nullptr;
+ m_QuantizedLstmParameters.m_OutputGateBias : nullptr;
return std::move(layer);
}
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index 544acbd816..ca97a6bb65 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -14,31 +14,31 @@ class ScopedCpuTensorHandle;
struct QuantizedLstmParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputToOutputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeights;
+ std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToOutputWeights;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_InputGateBias;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_ForgetGateBias;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::unique_ptr<ScopedCpuTensorHandle> m_CellBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_CellBias;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBias;
+ std::shared_ptr<ConstCpuTensorHandle> m_OutputGateBias;
};
/// This layer represents a QuantizedLstm operation.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index bd8cb096e2..8f6908ea5d 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -44,11 +44,11 @@ TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) co
{
auto layer = CloneBase<TransposeConvolution2dLayer>(graph, m_Param, GetName());
- layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr;
+ layer->m_Weight = m_Weight ? m_Weight : nullptr;
if (layer->m_Param.m_BiasEnabled)
{
- layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr;
+ layer->m_Bias = m_Bias ? m_Bias : nullptr;
}
return std::move(layer);
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 903c957393..53e73491d6 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -16,9 +16,9 @@ class TransposeConvolution2dLayer : public LayerWithParameters<TransposeConvolut
{
public:
/// A unique pointer to store weight values.
- std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstCpuTensorHandle> m_Weight;
/// A unique pointer to store bias values.
- std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstCpuTensorHandle> m_Bias;
/// Makes a workload for the TransposeConvolution2d type.
/// @param [in] graph The graph where this layer can be found.