aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/Network.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r--src/armnn/Network.cpp140
1 files changed, 70 insertions, 70 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index c99690e4a6..b79576c87e 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -12,7 +12,7 @@
#include "BackendSettings.hpp"
#include "optimizations/All.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
@@ -671,7 +671,7 @@ LayerT* ConvertBf16ToFp32Weight(Layer* l)
TensorInfo newInfo(info.GetShape(), DataType::Float32);
ConstTensor newInput(newInfo, newValues);
- layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
+ layer->m_Weight.reset(new ScopedTensorHandle(newInput));
}
}
return layer;
@@ -1752,10 +1752,10 @@ IConnectableLayer* NetworkImpl::AddFullyConnectedLayerImpl(const FullyConnectedD
if (fullyConnectedDescriptor.m_ConstantWeights)
{
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights.value());
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights.value());
if (fullyConnectedDescriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
}
@@ -1816,11 +1816,11 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDes
const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
if (convolution2dDescriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
return layer;
@@ -1864,11 +1864,11 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl(
const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
if (convolution2dDescriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
return layer;
@@ -1913,7 +1913,7 @@ IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::Detect
{
const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
- layer->m_Anchors = std::make_shared<ScopedCpuTensorHandle>(anchors);
+ layer->m_Anchors = std::make_shared<ScopedTensorHandle>(anchors);
return layer;
}
@@ -2011,10 +2011,10 @@ IConnectableLayer* NetworkImpl::AddBatchNormalizationLayer(const BatchNormalizat
{
const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
- layer->m_Mean = std::make_shared<ScopedCpuTensorHandle>(mean);
- layer->m_Variance = std::make_shared<ScopedCpuTensorHandle>(variance);
- layer->m_Beta = std::make_shared<ScopedCpuTensorHandle>(beta);
- layer->m_Gamma = std::make_shared<ScopedCpuTensorHandle>(gamma);
+ layer->m_Mean = std::make_shared<ScopedTensorHandle>(mean);
+ layer->m_Variance = std::make_shared<ScopedTensorHandle>(variance);
+ layer->m_Beta = std::make_shared<ScopedTensorHandle>(beta);
+ layer->m_Gamma = std::make_shared<ScopedTensorHandle>(gamma);
return layer;
}
@@ -2071,7 +2071,7 @@ IConnectableLayer* NetworkImpl::AddConstantLayer(const ConstTensor& input, const
{
auto layer = m_Graph->AddLayer<ConstantLayer>(name);
- layer->m_LayerOutput = std::make_shared<ScopedCpuTensorHandle>(input);
+ layer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(input);
return layer;
}
@@ -2107,23 +2107,23 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
//Lstm Basic Parameters
layer->m_BasicParameters.m_InputToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToForgetWeights));
layer->m_BasicParameters.m_InputToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToCellWeights));
layer->m_BasicParameters.m_InputToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToOutputWeights));
layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToForgetWeights));
layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToCellWeights));
layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToOutputWeights));
layer->m_BasicParameters.m_ForgetGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetGateBias));
layer->m_BasicParameters.m_CellBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellBias));
layer->m_BasicParameters.m_OutputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputGateBias));
//Lstm Cifg parameters
if(!descriptor.m_CifgEnabled)
@@ -2145,11 +2145,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when CIFG is disabled.");
}
layer->m_CifgParameters.m_InputToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToInputWeights));
layer->m_CifgParameters.m_RecurrentToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToInputWeights));
layer->m_CifgParameters.m_InputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputGateBias));
}
//Lstm projection parameters
@@ -2161,11 +2161,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when projection is enabled.");
}
layer->m_ProjectionParameters.m_ProjectionWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionWeights));
if(params.m_ProjectionBias != nullptr)
{
layer->m_ProjectionParameters.m_ProjectionBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionBias));
}
}
@@ -2181,7 +2181,7 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
}
layer->m_PeepholeParameters.m_CellToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToInputWeights));
}
if(params.m_CellToForgetWeights == nullptr)
@@ -2196,9 +2196,9 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
}
layer->m_PeepholeParameters.m_CellToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToForgetWeights));
layer->m_PeepholeParameters.m_CellToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToOutputWeights));
}
//Lstm Layer Normalization params
@@ -2212,7 +2212,7 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when layer normalization is enabled and CIFG disabled.");
}
layer->m_LayerNormParameters.m_InputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputLayerNormWeights));
}
if(params.m_ForgetLayerNormWeights == nullptr)
@@ -2231,11 +2231,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when layer normalization is enabled.");
}
layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetLayerNormWeights));
layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellLayerNormWeights));
layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputLayerNormWeights));
}
return layer;
}
@@ -2330,11 +2330,11 @@ IConnectableLayer* NetworkImpl::AddTransposeConvolution2dLayer(const TransposeCo
const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
if (descriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
return layer;
@@ -2366,33 +2366,33 @@ IConnectableLayer* NetworkImpl::AddQuantizedLstmLayer(const QuantizedLstmInputPa
// InputToX weights
layer->m_QuantizedLstmParameters.m_InputToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToInputWeights());
layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToForgetWeights());
layer->m_QuantizedLstmParameters.m_InputToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToCellWeights());
layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToOutputWeights());
// RecurrentToX weights
layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToInputWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToForgetWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToCellWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToOutputWeights());
// Bias
layer->m_QuantizedLstmParameters.m_InputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputGateBias());
+ std::make_shared<ScopedTensorHandle>(params.GetInputGateBias());
layer->m_QuantizedLstmParameters.m_ForgetGateBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetForgetGateBias());
+ std::make_shared<ScopedTensorHandle>(params.GetForgetGateBias());
layer->m_QuantizedLstmParameters.m_CellBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetCellBias());
+ std::make_shared<ScopedTensorHandle>(params.GetCellBias());
layer->m_QuantizedLstmParameters.m_OutputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetOutputGateBias());
+ std::make_shared<ScopedTensorHandle>(params.GetOutputGateBias());
return layer;
}
@@ -2405,23 +2405,23 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
// QLstm Basic Parameters
layer->m_BasicParameters.m_InputToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToForgetWeights));
layer->m_BasicParameters.m_InputToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToCellWeights));
layer->m_BasicParameters.m_InputToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToOutputWeights));
layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToForgetWeights));
layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToCellWeights));
layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToOutputWeights));
layer->m_BasicParameters.m_ForgetGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetGateBias));
layer->m_BasicParameters.m_CellBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellBias));
layer->m_BasicParameters.m_OutputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputGateBias));
// QLstm Cifg parameters
if(!descriptor.m_CifgEnabled)
@@ -2443,11 +2443,11 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_CifgParameters.m_InputToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToInputWeights));
layer->m_CifgParameters.m_RecurrentToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToInputWeights));
layer->m_CifgParameters.m_InputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputGateBias));
}
// QLstm Projection parameters
@@ -2459,13 +2459,13 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_ProjectionParameters.m_ProjectionWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionWeights));
// Projection bias is optional even if projection is enabled
if(params.m_ProjectionWeights != nullptr)
{
layer->m_ProjectionParameters.m_ProjectionBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionBias));
}
}
@@ -2491,13 +2491,13 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_PeepholeParameters.m_CellToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToInputWeights));
}
layer->m_PeepholeParameters.m_CellToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToForgetWeights));
layer->m_PeepholeParameters.m_CellToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToOutputWeights));
}
// QLstm Layer Normalization params
@@ -2526,15 +2526,15 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_LayerNormParameters.m_InputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputLayerNormWeights));
}
layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetLayerNormWeights));
layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellLayerNormWeights));
layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputLayerNormWeights));
}
return layer;
}