ArmNN
 20.02
FullyConnectedLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
6 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
13 
14 namespace armnn
15 {
16 
18  : LayerWithParameters(1, 1, LayerType::FullyConnected, param, name)
19 {
20 }
21 
22 std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFactory& factory) const
23 {
24  // on this level constant data should not be released..
25  BOOST_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
26 
28 
29  descriptor.m_Weight = m_Weight.get();
31  {
32  BOOST_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
33  descriptor.m_Bias = m_Bias.get();
34  }
35  return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor));
36 }
37 
39 {
40  auto layer = CloneBase<FullyConnectedLayer>(graph, m_Param, GetName());
41 
42  layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr;
43  if (layer->m_Param.m_BiasEnabled)
44  {
45  layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr;
46  }
47 
48  return std::move(layer);
49 }
50 
51 std::vector<TensorShape> FullyConnectedLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
52 {
53  BOOST_ASSERT(inputShapes.size() == 2);
54  const TensorShape& inputShape = inputShapes[0];
55  const TensorShape weightShape = inputShapes[1];
56 
57  // Output for FC is [1, w[1]].
58  unsigned int batches = inputShape[0];
59  unsigned int dimIdx = m_Param.m_TransposeWeightMatrix ? 0 : 1;
60 
61  return std::vector<TensorShape>({ TensorShape({batches, weightShape[dimIdx]})});
62 }
63 
65 {
67 
68  // check if we m_Weight data is not nullptr
69  BOOST_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
70 
71  auto inferredShapes = InferOutputShapes({
73  m_Weight->GetTensorInfo().GetShape() });
74 
75  BOOST_ASSERT(inferredShapes.size() == 1);
76 
77  ConditionalThrowIfNotEqual<LayerValidationException>(
78  "FullyConnectedLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
80  inferredShapes[0]);
81 }
82 
84 {
85  return {m_Weight, m_Bias};
86 }
87 
89 {
90  ConstTensor weightsTensor(m_Weight->GetTensorInfo(), m_Weight->Map(true));
91  Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
92 
93  if (GetParameters().m_BiasEnabled)
94  {
95  ConstTensor biasTensor(m_Bias->GetTensorInfo(), m_Bias->GetConstTensor<void>());
96  optionalBiasTensor = Optional<ConstTensor>(biasTensor);
97  }
98 
99  visitor.VisitFullyConnectedLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
100 }
101 
102 } // namespace armnn
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the FullyConnected type.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
FullyConnectedDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const FullyConnectedDescriptor & GetParameters() const
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
const ConstCpuTensorHandle * m_Weight
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of FullyConnectedLayer.
FullyConnectedLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
Copyright (c) 2020 ARM Limited.
FullyConnectedLayer(const FullyConnectedDescriptor &param, const char *name)
Constructor to create a FullyConnectedLayer.
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:338
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
virtual void VisitFullyConnectedLayer(const IConnectableLayer *layer, const FullyConnectedDescriptor &fullyConnectedDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)=0
Function that a fully connected layer should call back to when its Accept(ILayerVisitor&) function is...
This layer represents a fully connected operation.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
std::unique_ptr< ScopedCpuTensorHandle > m_Bias
A unique pointer to store Bias values.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:305
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
const ConstCpuTensorHandle * m_Bias
std::vector< std::reference_wrapper< std::unique_ptr< ScopedCpuTensorHandle > >> ConstantTensors
Definition: Layer.hpp:363
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63