ArmNN
 21.02
FullyConnected.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "FullyConnected.hpp"
7 
8 #include "RefWorkloadUtils.hpp"
9 
10 namespace armnn
11 {
12 
13 void FullyConnected(const TensorShape& rInputShape,
14  Decoder<float>& rInputDecoder,
15  const TensorShape& rOutputShape,
16  Encoder<float>& rOutputEncoder,
17  const TensorShape& rWeightsShape,
18  Decoder<float>& rWeightDecoder,
19  Decoder<float>& rBiasDecoder,
20  const bool biasEnabled,
21  const unsigned int K,
22  const bool transposeWeights)
23 {
24  // Perform FullyConnected implementation
25  unsigned int outputSize = rOutputShape[1];
26 
27  const std::vector<float> decodedInputs = rInputDecoder.DecodeTensor(rInputShape);
28  const std::vector<float> decodedWeights = rWeightDecoder.DecodeTensor(rWeightsShape);
29 
30  const TensorShape biasShape{outputSize};
31  const std::vector<float> decodedBiases = biasEnabled ? rBiasDecoder.DecodeTensor(biasShape) : std::vector<float>();
32 
33 
34  for (unsigned int n = 0; n < rInputShape[0]; n++)
35  {
36  for (unsigned int channelOutput = 0; channelOutput < outputSize; channelOutput++)
37  {
38  float outval = 0.f;
39 
40  for (unsigned int channelInput = 0; channelInput < K; channelInput++)
41  {
42  float weight;
43  if (transposeWeights)
44  {
45  weight = decodedWeights[channelOutput * K + channelInput];
46  }
47  else
48  {
49  weight = decodedWeights[channelInput * outputSize + channelOutput];
50  }
51 
52  outval += weight * decodedInputs[n * K + channelInput];
53  }
54 
55  if (biasEnabled)
56  {
57  outval += decodedBiases[channelOutput];
58  }
59 
60  rOutputEncoder[n * outputSize + channelOutput];
61  rOutputEncoder.Set(outval);
62  }
63  }
64 }
65 
66 } //namespace armnn
virtual void Set(IType right)=0
Copyright (c) 2021 ARM Limited and Contributors.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
virtual std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier=1, bool isDepthwise=false)=0