diff options
author | Keith Davis <keith.davis@arm.com> | 2022-04-07 11:32:00 +0100 |
---|---|---|
committer | Keith Davis <keith.davis@arm.com> | 2022-05-16 16:08:54 +0100 |
commit | b4dd5cc86d4eb841de670f0f102ede599e0d9c40 (patch) | |
tree | 77857cf739baecaf63701b66c1a2646b7930a834 /src/armnn/Network.cpp | |
parent | b86ec6641b4b06ccddad5eebbc21010d6184fe79 (diff) | |
download | armnn-b4dd5cc86d4eb841de670f0f102ede599e0d9c40.tar.gz |
IVGCVSW-6124 ConstTensorsAsInput: Conv2d - FrontEnd
* Update Front-end and Tools.
* Updated Serializer, Deserializer and unit tests to reflect this.
* Updated TfLiteDelegate, TfLiteParser and OnnxParser.
* Updated Ref.
* Fixed resulting Neon / CL tests
* Unified optimizers for conv2d ops
* Optimizer Fix - Fp32ToBf16
* Partial implementation for ACL backends to fix VTS failures
!android-nn-driver:7477
Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I5fb18877f7ee32643e15a9818945356274bb401b
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r-- | src/armnn/Network.cpp | 83 |
1 files changed, 27 insertions, 56 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index d2ebd4cde6..479e57fc56 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -83,35 +83,23 @@ IConnectableLayer* INetwork::AddConcatLayer(const ConcatDescriptor& concatDescri IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional<ConstTensor>& biases, const char* name) { - return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name); + return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, name); } - -IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const char* name) -{ - Optional<ConstTensor> biases; - return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name); -} - - +ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const ConstTensor& biases, - const char* name ) + const ConstTensor& weights, + const Optional<ConstTensor>& biases, + const char* name) { - return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, armnn::Optional<ConstTensor>(biases), name); } - +ARMNN_NO_DEPRECATE_WARN_END IConnectableLayer* INetwork::AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor, const char* name) @@ -2012,25 +2000,33 @@ IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDes return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name); } -IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional<ConstTensor>& biases, - const char* name) +IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, + const char* name) { - if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value()) - { - throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty"); - } - - const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name); + return m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name); +} +IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const Optional<ConstTensor>& biases, + const char* name) +{ + auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name); + // Add a constant layer for weights + ConstantLayer* weightsLayer = m_Graph->AddLayer<ConstantLayer>("Weights"); + weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights); layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights); - - if (convolution2dDescriptor.m_BiasEnabled) + weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo()); + weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1)); + // Add a constant layer for biases + if (biases.has_value() && convolution2dDescriptor.m_BiasEnabled) { + ConstantLayer* biasLayer = m_Graph->AddLayer<ConstantLayer>("Bias"); + biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biases.value()); layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value()); + biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo()); + biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2)); } - return layer; } @@ -2044,31 +2040,6 @@ IConnectableLayer* NetworkImpl::AddConvertFp32ToFp16Layer(const char* name) return m_Graph->AddLayer<ConvertFp32ToFp16Layer>(name); } -IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional<ConstTensor>& biases, - const char* name) -{ - return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name); -} - -IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const char* name) -{ - Optional<ConstTensor> biases; - return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name); -} - -IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const ConstTensor& biases, - const char* name) -{ - Optional<ConstTensor> optionalBiases(biases); - return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name); -} - IConnectableLayer* NetworkImpl::AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor, const char* name) { |