From 1f58f03d82c482626b1b4673b6c0e25da4338fb5 Mon Sep 17 00:00:00 2001 From: James Conroy Date: Tue, 27 Apr 2021 17:13:27 +0100 Subject: IVGCVSW-5815 Generalise ConstCpuTensorHandle * Generalises ConstCpuTensorHandle and inherited classes by removing 'Cpu' from aliases. * New renamed classes: ConstTensorHandle, TensorHandle, ScopedTensorHandle, PassthroughTensorHandle, ConstPassthroughTensorHandle. Signed-off-by: James Conroy Change-Id: I1824e0e134202735fb77051f20a7252f161dfe16 --- src/armnn/Network.cpp | 140 +++++++++++++++++++++++++------------------------- 1 file changed, 70 insertions(+), 70 deletions(-) (limited to 'src/armnn/Network.cpp') diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index c99690e4a6..b79576c87e 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -12,7 +12,7 @@ #include "BackendSettings.hpp" #include "optimizations/All.hpp" -#include +#include #include #include #include @@ -671,7 +671,7 @@ LayerT* ConvertBf16ToFp32Weight(Layer* l) TensorInfo newInfo(info.GetShape(), DataType::Float32); ConstTensor newInput(newInfo, newValues); - layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput)); + layer->m_Weight.reset(new ScopedTensorHandle(newInput)); } } return layer; @@ -1752,10 +1752,10 @@ IConnectableLayer* NetworkImpl::AddFullyConnectedLayerImpl(const FullyConnectedD if (fullyConnectedDescriptor.m_ConstantWeights) { - layer->m_Weight = std::make_shared(weights.value()); + layer->m_Weight = std::make_shared(weights.value()); if (fullyConnectedDescriptor.m_BiasEnabled) { - layer->m_Bias = std::make_shared(biases.value()); + layer->m_Bias = std::make_shared(biases.value()); } } @@ -1816,11 +1816,11 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDes const auto layer = m_Graph->AddLayer(convolution2dDescriptor, name); - layer->m_Weight = std::make_shared(weights); + layer->m_Weight = std::make_shared(weights); if (convolution2dDescriptor.m_BiasEnabled) { - layer->m_Bias = std::make_shared(biases.value()); + layer->m_Bias = std::make_shared(biases.value()); } return layer; @@ -1864,11 +1864,11 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl( const auto layer = m_Graph->AddLayer(convolution2dDescriptor, name); - layer->m_Weight = std::make_shared(weights); + layer->m_Weight = std::make_shared(weights); if (convolution2dDescriptor.m_BiasEnabled) { - layer->m_Bias = std::make_shared(biases.value()); + layer->m_Bias = std::make_shared(biases.value()); } return layer; @@ -1913,7 +1913,7 @@ IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::Detect { const auto layer = m_Graph->AddLayer(descriptor, name); - layer->m_Anchors = std::make_shared(anchors); + layer->m_Anchors = std::make_shared(anchors); return layer; } @@ -2011,10 +2011,10 @@ IConnectableLayer* NetworkImpl::AddBatchNormalizationLayer(const BatchNormalizat { const auto layer = m_Graph->AddLayer(desc, name); - layer->m_Mean = std::make_shared(mean); - layer->m_Variance = std::make_shared(variance); - layer->m_Beta = std::make_shared(beta); - layer->m_Gamma = std::make_shared(gamma); + layer->m_Mean = std::make_shared(mean); + layer->m_Variance = std::make_shared(variance); + layer->m_Beta = std::make_shared(beta); + layer->m_Gamma = std::make_shared(gamma); return layer; } @@ -2071,7 +2071,7 @@ IConnectableLayer* NetworkImpl::AddConstantLayer(const ConstTensor& input, const { auto layer = m_Graph->AddLayer(name); - layer->m_LayerOutput = std::make_shared(input); + layer->m_LayerOutput = std::make_shared(input); return layer; } @@ -2107,23 +2107,23 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor, //Lstm Basic Parameters layer->m_BasicParameters.m_InputToForgetWeights = - std::make_shared(*(params.m_InputToForgetWeights)); + std::make_shared(*(params.m_InputToForgetWeights)); layer->m_BasicParameters.m_InputToCellWeights = - std::make_shared(*(params.m_InputToCellWeights)); + std::make_shared(*(params.m_InputToCellWeights)); layer->m_BasicParameters.m_InputToOutputWeights = - std::make_shared(*(params.m_InputToOutputWeights)); + std::make_shared(*(params.m_InputToOutputWeights)); layer->m_BasicParameters.m_RecurrentToForgetWeights = - std::make_shared(*(params.m_RecurrentToForgetWeights)); + std::make_shared(*(params.m_RecurrentToForgetWeights)); layer->m_BasicParameters.m_RecurrentToCellWeights = - std::make_shared(*(params.m_RecurrentToCellWeights)); + std::make_shared(*(params.m_RecurrentToCellWeights)); layer->m_BasicParameters.m_RecurrentToOutputWeights = - std::make_shared(*(params.m_RecurrentToOutputWeights)); + std::make_shared(*(params.m_RecurrentToOutputWeights)); layer->m_BasicParameters.m_ForgetGateBias = - std::make_shared(*(params.m_ForgetGateBias)); + std::make_shared(*(params.m_ForgetGateBias)); layer->m_BasicParameters.m_CellBias = - std::make_shared(*(params.m_CellBias)); + std::make_shared(*(params.m_CellBias)); layer->m_BasicParameters.m_OutputGateBias = - std::make_shared(*(params.m_OutputGateBias)); + std::make_shared(*(params.m_OutputGateBias)); //Lstm Cifg parameters if(!descriptor.m_CifgEnabled) @@ -2145,11 +2145,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor, "when CIFG is disabled."); } layer->m_CifgParameters.m_InputToInputWeights = - std::make_shared(*(params.m_InputToInputWeights)); + std::make_shared(*(params.m_InputToInputWeights)); layer->m_CifgParameters.m_RecurrentToInputWeights = - std::make_shared(*(params.m_RecurrentToInputWeights)); + std::make_shared(*(params.m_RecurrentToInputWeights)); layer->m_CifgParameters.m_InputGateBias = - std::make_shared(*(params.m_InputGateBias)); + std::make_shared(*(params.m_InputGateBias)); } //Lstm projection parameters @@ -2161,11 +2161,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor, "when projection is enabled."); } layer->m_ProjectionParameters.m_ProjectionWeights = - std::make_shared(*(params.m_ProjectionWeights)); + std::make_shared(*(params.m_ProjectionWeights)); if(params.m_ProjectionBias != nullptr) { layer->m_ProjectionParameters.m_ProjectionBias = - std::make_shared(*(params.m_ProjectionBias)); + std::make_shared(*(params.m_ProjectionBias)); } } @@ -2181,7 +2181,7 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor, } layer->m_PeepholeParameters.m_CellToInputWeights = - std::make_shared(*(params.m_CellToInputWeights)); + std::make_shared(*(params.m_CellToInputWeights)); } if(params.m_CellToForgetWeights == nullptr) @@ -2196,9 +2196,9 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor, } layer->m_PeepholeParameters.m_CellToForgetWeights = - std::make_shared(*(params.m_CellToForgetWeights)); + std::make_shared(*(params.m_CellToForgetWeights)); layer->m_PeepholeParameters.m_CellToOutputWeights = - std::make_shared(*(params.m_CellToOutputWeights)); + std::make_shared(*(params.m_CellToOutputWeights)); } //Lstm Layer Normalization params @@ -2212,7 +2212,7 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor, "when layer normalization is enabled and CIFG disabled."); } layer->m_LayerNormParameters.m_InputLayerNormWeights = - std::make_shared(*(params.m_InputLayerNormWeights)); + std::make_shared(*(params.m_InputLayerNormWeights)); } if(params.m_ForgetLayerNormWeights == nullptr) @@ -2231,11 +2231,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor, "when layer normalization is enabled."); } layer->m_LayerNormParameters.m_ForgetLayerNormWeights = - std::make_shared(*(params.m_ForgetLayerNormWeights)); + std::make_shared(*(params.m_ForgetLayerNormWeights)); layer->m_LayerNormParameters.m_CellLayerNormWeights = - std::make_shared(*(params.m_CellLayerNormWeights)); + std::make_shared(*(params.m_CellLayerNormWeights)); layer->m_LayerNormParameters.m_OutputLayerNormWeights = - std::make_shared(*(params.m_OutputLayerNormWeights)); + std::make_shared(*(params.m_OutputLayerNormWeights)); } return layer; } @@ -2330,11 +2330,11 @@ IConnectableLayer* NetworkImpl::AddTransposeConvolution2dLayer(const TransposeCo const auto layer = m_Graph->AddLayer(descriptor, name); - layer->m_Weight = std::make_shared(weights); + layer->m_Weight = std::make_shared(weights); if (descriptor.m_BiasEnabled) { - layer->m_Bias = std::make_shared(biases.value()); + layer->m_Bias = std::make_shared(biases.value()); } return layer; @@ -2366,33 +2366,33 @@ IConnectableLayer* NetworkImpl::AddQuantizedLstmLayer(const QuantizedLstmInputPa // InputToX weights layer->m_QuantizedLstmParameters.m_InputToInputWeights = - std::make_shared(params.GetInputToInputWeights()); + std::make_shared(params.GetInputToInputWeights()); layer->m_QuantizedLstmParameters.m_InputToForgetWeights = - std::make_shared(params.GetInputToForgetWeights()); + std::make_shared(params.GetInputToForgetWeights()); layer->m_QuantizedLstmParameters.m_InputToCellWeights = - std::make_shared(params.GetInputToCellWeights()); + std::make_shared(params.GetInputToCellWeights()); layer->m_QuantizedLstmParameters.m_InputToOutputWeights = - std::make_shared(params.GetInputToOutputWeights()); + std::make_shared(params.GetInputToOutputWeights()); // RecurrentToX weights layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = - std::make_shared(params.GetRecurrentToInputWeights()); + std::make_shared(params.GetRecurrentToInputWeights()); layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = - std::make_shared(params.GetRecurrentToForgetWeights()); + std::make_shared(params.GetRecurrentToForgetWeights()); layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = - std::make_shared(params.GetRecurrentToCellWeights()); + std::make_shared(params.GetRecurrentToCellWeights()); layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = - std::make_shared(params.GetRecurrentToOutputWeights()); + std::make_shared(params.GetRecurrentToOutputWeights()); // Bias layer->m_QuantizedLstmParameters.m_InputGateBias = - std::make_shared(params.GetInputGateBias()); + std::make_shared(params.GetInputGateBias()); layer->m_QuantizedLstmParameters.m_ForgetGateBias = - std::make_shared(params.GetForgetGateBias()); + std::make_shared(params.GetForgetGateBias()); layer->m_QuantizedLstmParameters.m_CellBias = - std::make_shared(params.GetCellBias()); + std::make_shared(params.GetCellBias()); layer->m_QuantizedLstmParameters.m_OutputGateBias = - std::make_shared(params.GetOutputGateBias()); + std::make_shared(params.GetOutputGateBias()); return layer; } @@ -2405,23 +2405,23 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor // QLstm Basic Parameters layer->m_BasicParameters.m_InputToForgetWeights = - std::make_shared(*(params.m_InputToForgetWeights)); + std::make_shared(*(params.m_InputToForgetWeights)); layer->m_BasicParameters.m_InputToCellWeights = - std::make_shared(*(params.m_InputToCellWeights)); + std::make_shared(*(params.m_InputToCellWeights)); layer->m_BasicParameters.m_InputToOutputWeights = - std::make_shared(*(params.m_InputToOutputWeights)); + std::make_shared(*(params.m_InputToOutputWeights)); layer->m_BasicParameters.m_RecurrentToForgetWeights = - std::make_shared(*(params.m_RecurrentToForgetWeights)); + std::make_shared(*(params.m_RecurrentToForgetWeights)); layer->m_BasicParameters.m_RecurrentToCellWeights = - std::make_shared(*(params.m_RecurrentToCellWeights)); + std::make_shared(*(params.m_RecurrentToCellWeights)); layer->m_BasicParameters.m_RecurrentToOutputWeights = - std::make_shared(*(params.m_RecurrentToOutputWeights)); + std::make_shared(*(params.m_RecurrentToOutputWeights)); layer->m_BasicParameters.m_ForgetGateBias = - std::make_shared(*(params.m_ForgetGateBias)); + std::make_shared(*(params.m_ForgetGateBias)); layer->m_BasicParameters.m_CellBias = - std::make_shared(*(params.m_CellBias)); + std::make_shared(*(params.m_CellBias)); layer->m_BasicParameters.m_OutputGateBias = - std::make_shared(*(params.m_OutputGateBias)); + std::make_shared(*(params.m_OutputGateBias)); // QLstm Cifg parameters if(!descriptor.m_CifgEnabled) @@ -2443,11 +2443,11 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor } layer->m_CifgParameters.m_InputToInputWeights = - std::make_shared(*(params.m_InputToInputWeights)); + std::make_shared(*(params.m_InputToInputWeights)); layer->m_CifgParameters.m_RecurrentToInputWeights = - std::make_shared(*(params.m_RecurrentToInputWeights)); + std::make_shared(*(params.m_RecurrentToInputWeights)); layer->m_CifgParameters.m_InputGateBias = - std::make_shared(*(params.m_InputGateBias)); + std::make_shared(*(params.m_InputGateBias)); } // QLstm Projection parameters @@ -2459,13 +2459,13 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor } layer->m_ProjectionParameters.m_ProjectionWeights = - std::make_shared(*(params.m_ProjectionWeights)); + std::make_shared(*(params.m_ProjectionWeights)); // Projection bias is optional even if projection is enabled if(params.m_ProjectionWeights != nullptr) { layer->m_ProjectionParameters.m_ProjectionBias = - std::make_shared(*(params.m_ProjectionBias)); + std::make_shared(*(params.m_ProjectionBias)); } } @@ -2491,13 +2491,13 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor } layer->m_PeepholeParameters.m_CellToInputWeights = - std::make_shared(*(params.m_CellToInputWeights)); + std::make_shared(*(params.m_CellToInputWeights)); } layer->m_PeepholeParameters.m_CellToForgetWeights = - std::make_shared(*(params.m_CellToForgetWeights)); + std::make_shared(*(params.m_CellToForgetWeights)); layer->m_PeepholeParameters.m_CellToOutputWeights = - std::make_shared(*(params.m_CellToOutputWeights)); + std::make_shared(*(params.m_CellToOutputWeights)); } // QLstm Layer Normalization params @@ -2526,15 +2526,15 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor } layer->m_LayerNormParameters.m_InputLayerNormWeights = - std::make_shared(*(params.m_InputLayerNormWeights)); + std::make_shared(*(params.m_InputLayerNormWeights)); } layer->m_LayerNormParameters.m_ForgetLayerNormWeights = - std::make_shared(*(params.m_ForgetLayerNormWeights)); + std::make_shared(*(params.m_ForgetLayerNormWeights)); layer->m_LayerNormParameters.m_CellLayerNormWeights = - std::make_shared(*(params.m_CellLayerNormWeights)); + std::make_shared(*(params.m_CellLayerNormWeights)); layer->m_LayerNormParameters.m_OutputLayerNormWeights = - std::make_shared(*(params.m_OutputLayerNormWeights)); + std::make_shared(*(params.m_OutputLayerNormWeights)); } return layer; } -- cgit v1.2.1