aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-03-25 07:46:55 +0000
committerSadik Armagan <sadik.armagan@arm.com>2021-03-25 07:46:55 +0000
commitf0a6dec75832604d5ab18242dc216852821a8279 (patch)
treeff25e64c62c63975a54abd16a8bff744be70d7c0 /src/armnn
parent16fb1a2d9c1d3d80c0f0b6ab549919fbabd2a0b9 (diff)
downloadarmnn-f0a6dec75832604d5ab18242dc216852821a8279.tar.gz
IVGCVSW-5736 and IVGCVSW-5743 'NonConstWeights: Update front-end and TfLiteDelegate support for FullyConnected Operator'
* Added front-end support for non-const weights for FULLY_CONNECTED operator * Added FULLY_CONNECTED end-to-end test * Updated FULLY_CONNECTED operator support in TfLite Arm NN Delegate for non-const weights * Updated the version numbers Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Iffa5b9aa9297aca4c02d923cce4636c88ac21faa
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/BackendHelper.cpp26
-rw-r--r--src/armnn/Descriptors.cpp17
-rw-r--r--src/armnn/Network.cpp69
-rw-r--r--src/armnn/Network.hpp7
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp82
-rw-r--r--src/armnn/test/OptimizerTests.cpp8
-rw-r--r--src/armnn/test/UtilsTests.cpp12
7 files changed, 171 insertions, 50 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 1467366323..1c926f4d30 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -23,7 +23,21 @@ LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId& backend)
auto factoryFunc = backendRegistry.GetFactory(backend);
auto backendObject = factoryFunc();
- return LayerSupportHandle(backendObject->GetLayerSupport());
+ return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
+}
+
+/// Convenience function to check a capability on a backend
+bool IsCapabilitySupported(const armnn::BackendId& backend, armnn::BackendCapability capability)
+{
+ bool hasCapability = false;
+ auto const& backendRegistry = armnn::BackendRegistryInstance();
+ if (backendRegistry.IsBackendRegistered(backend))
+ {
+ auto factoryFunc = backendRegistry.GetFactory(backend);
+ auto backendObject = factoryFunc();
+ hasCapability = backendObject->HasCapability(capability);
+ }
+ return hasCapability;
}
bool LayerSupportHandle::IsBackendRegistered() const
@@ -293,6 +307,16 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
+ if(!descriptor.m_ConstantWeights && !m_BackendId.IsUndefined())
+ {
+ bool result = false;
+ result = IsCapabilitySupported(m_BackendId, BackendCapability::NonConstWeights);
+ if (!result)
+ {
+ return result;
+ }
+ }
+
return m_LayerSupport->IsFullyConnectedSupported(input,
output,
weights,
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index 881023e968..706992ccb0 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -425,4 +425,21 @@ int StridedSliceDescriptor::GetStopForAxis(const TensorShape& inputShape,
}
+uint32_t FullyConnectedDescriptor::GetNumViews() const
+{
+ // Return 1 with constant weights, otherwise check if bias is enabled
+ uint32_t numInputs = 1;
+ if (!m_ConstantWeights)
+ {
+ // non-const weights
+ numInputs = 2;
+ if (m_BiasEnabled)
+ {
+ // non-const bias
+ numInputs = 3;
+ }
+ }
+ return numInputs;
+}
+
}
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 9373a6ac15..18a4d02fca 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -171,21 +171,26 @@ IConnectableLayer* INetwork::AddFillLayer(const FillDescriptor& fillDescriptor,
return pNetworkImpl->AddFillLayer(fillDescriptor, name);
}
-
IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
const char* name)
{
- return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
+ return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor,
+ armnn::Optional<ConstTensor>(weights),
+ biases,
+ name);
}
IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const char* name)
{
- Optional<ConstTensor> biases;
- return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
+ armnn::Optional<ConstTensor> biases;
+ return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor,
+ armnn::Optional<ConstTensor>(weights),
+ biases,
+ name);
}
IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
@@ -193,8 +198,18 @@ IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescript
const ConstTensor& biases,
const char* name)
{
- return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
- armnn::Optional<ConstTensor>(biases), name);
+ return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor,
+ armnn::Optional<ConstTensor>(weights),
+ armnn::Optional<ConstTensor>(biases),
+ name);
+}
+
+IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const Optional<ConstTensor>& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
}
IConnectableLayer* INetwork::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
@@ -1709,41 +1724,58 @@ IConnectableLayer* NetworkImpl::AddFillLayer(const FillDescriptor& fillDescripto
}
IConnectableLayer* NetworkImpl::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name)
+ const Optional<ConstTensor>& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
{
- if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
+ if (fullyConnectedDescriptor.m_ConstantWeights && !weights.has_value())
{
- throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
+ throw InvalidArgumentException("AddFullyConnectedLayer: weights cannot be empty");
+
+ if (fullyConnectedDescriptor.m_BiasEnabled && !biases.has_value())
+ {
+ throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be empty");
+ }
}
const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
-
- if (fullyConnectedDescriptor.m_BiasEnabled)
+ if (fullyConnectedDescriptor.m_ConstantWeights)
{
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights.value());
+ if (fullyConnectedDescriptor.m_BiasEnabled)
+ {
+ layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.value());
+ }
}
return layer;
}
IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const Optional<ConstTensor>& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
+}
+
+IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
const char* name)
{
- return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
+ Optional<ConstTensor> optionalWeights(weights);
+ return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, optionalWeights, biases, name);
}
IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const char* name)
{
+ Optional<ConstTensor> optionalWeights(weights);
Optional<ConstTensor> biases;
- return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
+ return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, optionalWeights, biases, name);
}
IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
@@ -1751,8 +1783,9 @@ IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescr
const ConstTensor& biases,
const char* name)
{
+ Optional<ConstTensor> optionalWeights(weights);
Optional<ConstTensor> optionalBiases(biases);
- return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
+ return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, optionalWeights, optionalBiases, name);
}
IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 8f16be1684..30941ca9e4 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -104,6 +104,11 @@ public:
const char* name = nullptr);
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const Optional<ConstTensor>& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr);
+
+ IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
const char* name = nullptr);
@@ -265,7 +270,7 @@ public:
private:
IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
+ const Optional<ConstTensor>& weights,
const Optional<ConstTensor>& biases,
const char* name);
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 0e5e5942de..6d0b57a84c 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -15,24 +15,25 @@ namespace armnn
{
FullyConnectedLayer::FullyConnectedLayer(const FullyConnectedDescriptor& param, const char* name)
- : LayerWithParameters(1, 1, LayerType::FullyConnected, param, name)
+ : LayerWithParameters(param.GetNumViews(), 1, LayerType::FullyConnected, param, name)
{
}
std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
-
FullyConnectedQueueDescriptor descriptor;
-
- descriptor.m_Weight = m_Weight.get();
- if (m_Param.m_BiasEnabled)
+ if (m_Param.m_ConstantWeights)
{
- ARMNN_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
- descriptor.m_Bias = m_Bias.get();
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
+ descriptor.m_Weight = m_Weight.get();
+
+ if (m_Param.m_BiasEnabled)
+ {
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
+ descriptor.m_Bias = m_Bias.get();
+ }
}
-
SetAdditionalInfo(descriptor);
return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor));
@@ -41,13 +42,15 @@ std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFa
FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const
{
auto layer = CloneBase<FullyConnectedLayer>(graph, m_Param, GetName());
-
- layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr;
- if (layer->m_Param.m_BiasEnabled)
+ if (m_Param.m_ConstantWeights)
{
- layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr;
- }
+ layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr;
+ if (layer->m_Param.m_BiasEnabled)
+ {
+ layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr;
+ }
+ }
return std::move(layer);
}
@@ -70,11 +73,20 @@ void FullyConnectedLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
- // check if we m_Weight data is not nullptr
- ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
+ std::vector<TensorShape> inferredShapes;
+ if (m_Param.m_ConstantWeights)
+ {
+ // check if m_Weight data is not nullptr
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
- auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
- m_Weight->GetTensorInfo().GetShape() });
+ inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ m_Weight->GetTensorInfo().GetShape()});
+ }
+ else
+ {
+ inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()});
+ }
ARMNN_ASSERT(inferredShapes.size() == 1);
ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
@@ -89,27 +101,37 @@ Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
{
- ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true));
+ Optional<ConstTensor> optionalWeightsTensor = EmptyOptional();
Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
-
- if (GetParameters().m_BiasEnabled)
+ if(GetParameters().m_ConstantWeights)
{
- ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->GetConstTensor<void>());
- optionalBiasTensor = Optional<ConstTensor>(biasTensor);
+ ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->GetConstTensor<void>());
+ optionalWeightsTensor = Optional<ConstTensor>(weightsTensor);
+
+ if (GetParameters().m_BiasEnabled)
+ {
+ ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->GetConstTensor<void>());
+ optionalBiasTensor = Optional<ConstTensor>(biasTensor);
+ }
}
-
- visitor.VisitFullyConnectedLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
+ visitor.VisitFullyConnectedLayer(this,
+ GetParameters(),
+ optionalWeightsTensor.value(),
+ optionalBiasTensor,
+ GetName());
}
void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const
{
- std::vector<armnn::ConstTensor> constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} };
-
- if (GetParameters().m_BiasEnabled)
+ std::vector <armnn::ConstTensor> constTensors;
+ if(GetParameters().m_ConstantWeights)
{
- constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+ constTensors.emplace_back(ConstTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true)));
+ if (GetParameters().m_BiasEnabled)
+ {
+ constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+ }
}
-
strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
}
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index fa860abb64..896fdfd68c 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -10,6 +10,7 @@
#include <Network.hpp>
#include <Optimizer.hpp>
+#include <armnn/BackendHelper.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/INetwork.hpp>
#include <armnn/LayerVisitorBase.hpp>
@@ -679,6 +680,13 @@ public:
};
};
+BOOST_AUTO_TEST_CASE(BackendCapabilityTest)
+{
+ BackendId backendId ="MockBackend";
+ // MockBackend does not support the NonConstWeights capability
+ BOOST_CHECK(!armnn::IsCapabilitySupported(backendId, armnn::BackendCapability::NonConstWeights));
+}
+
BOOST_AUTO_TEST_CASE(BackendHintTest)
{
class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp
index 7776a2d3cf..f0198cb9d4 100644
--- a/src/armnn/test/UtilsTests.cpp
+++ b/src/armnn/test/UtilsTests.cpp
@@ -278,6 +278,18 @@ BOOST_AUTO_TEST_CASE(LayerSupportHandle)
BOOST_CHECK(layerSupportObject.IsBackendRegistered());
}
+
+BOOST_AUTO_TEST_CASE(IsCapabilitySupported_CpuRef)
+{
+ BOOST_CHECK(armnn::IsCapabilitySupported(armnn::Compute::CpuRef, armnn::BackendCapability::NonConstWeights));
+}
+#endif
+
+#if defined(ARMCOMPUTENEON_ENABLED)
+BOOST_AUTO_TEST_CASE(IsCapabilitySupported_CpuAcc)
+{
+ BOOST_CHECK(!armnn::IsCapabilitySupported(armnn::Compute::CpuAcc, armnn::BackendCapability::NonConstWeights));
+}
#endif
BOOST_AUTO_TEST_SUITE_END()