aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/Network.cpp
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2022-08-03 11:27:05 +0100
committerCathal Corbett <cathal.corbett@arm.com>2022-08-12 10:14:15 +0100
commit1e22d965034d8ca7964bd2be095adef67ca287e3 (patch)
tree55ced2f4a4b3f47374dfb0d5b7a4ff326b0a203c /src/armnn/Network.cpp
parent560c393829bb24b60715849c26b21c58a0d92c32 (diff)
downloadarmnn-1e22d965034d8ca7964bd2be095adef67ca287e3.tar.gz
IVGCVSW-6954 'Arm NN SL Improvements'
* Move the Conv2D and DepthwiseConv2D validation to Optimization level when the weights and tensors are as constant inputs * Take into account offset and scales values when doing INT8 to FP32 dequantization Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I1f81f15640395ac041923b10dbe9151159715117
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r--src/armnn/Network.cpp20
1 files changed, 12 insertions, 8 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 22fc0a3ed4..c4869fae04 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -2058,16 +2058,18 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescrip
auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
// Add a constant layer for weights
ConstantLayer* weightsLayer = m_Graph->AddLayer<ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
- layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
+ auto weightsTensorHandle = std::make_shared<ScopedTensorHandle>(weights);
+ weightsLayer->m_LayerOutput = weightsTensorHandle;
+ layer->m_Weight = weightsTensorHandle;
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
// Add a constant layer for biases
if (biases.has_value() && convolution2dDescriptor.m_BiasEnabled)
{
ConstantLayer* biasLayer = m_Graph->AddLayer<ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biases.value());
- layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
+ auto biasTensorHandle = std::make_shared<ScopedTensorHandle>(biases.value());
+ biasLayer->m_LayerOutput = biasTensorHandle;
+ layer->m_Bias = biasTensorHandle;
biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
}
@@ -2113,8 +2115,9 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
// Add a constant layer for weights
ConstantLayer* weightsLayer = m_Graph->AddLayer<ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
- layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
+ auto weightsTensorHandle = std::make_shared<ScopedTensorHandle>(weights);
+ weightsLayer->m_LayerOutput = weightsTensorHandle;
+ layer->m_Weight = weightsTensorHandle;
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -2123,8 +2126,9 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
if (biases.has_value() && convolution2dDescriptor.m_BiasEnabled)
{
ConstantLayer* biasLayer = m_Graph->AddLayer<ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biases.value());
- layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
+ auto biasTensorHandle = std::make_shared<ScopedTensorHandle>(biases.value());
+ biasLayer->m_LayerOutput = biasTensorHandle;
+ layer->m_Bias = biasTensorHandle;
biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));