aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/Network.cpp
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2022-08-03 11:27:05 +0100
committerNikhil Raj <nikhil.raj@arm.com>2022-08-29 10:12:21 +0100
commitb016157f1eea1acc6a84308521c0b90543161da4 (patch)
treefe228d1014f4fa9a4f74227d0640719d1d92193c /src/armnn/Network.cpp
parentee480d2d6538b0192d40a00ed696b30e2587430c (diff)
downloadarmnn-b016157f1eea1acc6a84308521c0b90543161da4.tar.gz
IVGCVSW-6954 'Arm NN SL Improvements'
* Move the Conv2D and DepthwiseConv2D validation to Optimization level when the weights and tensors are as constant inputs * Take into account offset and scales values when doing INT8 to FP32 dequantization Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I1f81f15640395ac041923b10dbe9151159715117
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r--src/armnn/Network.cpp20
1 files changed, 12 insertions, 8 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 22fc0a3ed4..c4869fae04 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -2058,16 +2058,18 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescrip
auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
// Add a constant layer for weights
ConstantLayer* weightsLayer = m_Graph->AddLayer<ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
- layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
+ auto weightsTensorHandle = std::make_shared<ScopedTensorHandle>(weights);
+ weightsLayer->m_LayerOutput = weightsTensorHandle;
+ layer->m_Weight = weightsTensorHandle;
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
// Add a constant layer for biases
if (biases.has_value() && convolution2dDescriptor.m_BiasEnabled)
{
ConstantLayer* biasLayer = m_Graph->AddLayer<ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biases.value());
- layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
+ auto biasTensorHandle = std::make_shared<ScopedTensorHandle>(biases.value());
+ biasLayer->m_LayerOutput = biasTensorHandle;
+ layer->m_Bias = biasTensorHandle;
biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
}
@@ -2113,8 +2115,9 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
// Add a constant layer for weights
ConstantLayer* weightsLayer = m_Graph->AddLayer<ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
- layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
+ auto weightsTensorHandle = std::make_shared<ScopedTensorHandle>(weights);
+ weightsLayer->m_LayerOutput = weightsTensorHandle;
+ layer->m_Weight = weightsTensorHandle;
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -2123,8 +2126,9 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
if (biases.has_value() && convolution2dDescriptor.m_BiasEnabled)
{
ConstantLayer* biasLayer = m_Graph->AddLayer<ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biases.value());
- layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
+ auto biasTensorHandle = std::make_shared<ScopedTensorHandle>(biases.value());
+ biasLayer->m_LayerOutput = biasTensorHandle;
+ layer->m_Bias = biasTensorHandle;
biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));